GNU Linux-libre 4.9.333-gnu1
[releases.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include <linux/log2.h>
31 #include <drm/drmP.h>
32 #include "i915_drv.h"
33 #include <drm/i915_drm.h>
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 /* Rough estimate of the typical request size, performing a flush,
38  * set-context and then emitting the batch.
39  */
40 #define LEGACY_REQUEST_SIZE 200
41
42 int __intel_ring_space(int head, int tail, int size)
43 {
44         int space = head - tail;
45         if (space <= 0)
46                 space += size;
47         return space - I915_RING_FREE_SPACE;
48 }
49
50 void intel_ring_update_space(struct intel_ring *ring)
51 {
52         if (ring->last_retired_head != -1) {
53                 ring->head = ring->last_retired_head;
54                 ring->last_retired_head = -1;
55         }
56
57         ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
58                                          ring->tail, ring->size);
59 }
60
61 static int
62 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
63 {
64         struct intel_ring *ring = req->ring;
65         u32 cmd;
66         int ret;
67
68         cmd = MI_FLUSH;
69
70         if (mode & EMIT_INVALIDATE)
71                 cmd |= MI_READ_FLUSH;
72
73         ret = intel_ring_begin(req, 2);
74         if (ret)
75                 return ret;
76
77         intel_ring_emit(ring, cmd);
78         intel_ring_emit(ring, MI_NOOP);
79         intel_ring_advance(ring);
80
81         return 0;
82 }
83
84 static int
85 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
86 {
87         struct intel_ring *ring = req->ring;
88         u32 cmd;
89         int ret;
90
91         /*
92          * read/write caches:
93          *
94          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
95          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
96          * also flushed at 2d versus 3d pipeline switches.
97          *
98          * read-only caches:
99          *
100          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
101          * MI_READ_FLUSH is set, and is always flushed on 965.
102          *
103          * I915_GEM_DOMAIN_COMMAND may not exist?
104          *
105          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
106          * invalidated when MI_EXE_FLUSH is set.
107          *
108          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
109          * invalidated with every MI_FLUSH.
110          *
111          * TLBs:
112          *
113          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
114          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
115          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
116          * are flushed at any MI_FLUSH.
117          */
118
119         cmd = MI_FLUSH;
120         if (mode & EMIT_INVALIDATE) {
121                 cmd |= MI_EXE_FLUSH;
122                 if (IS_G4X(req->i915) || IS_GEN5(req->i915))
123                         cmd |= MI_INVALIDATE_ISP;
124         }
125
126         ret = intel_ring_begin(req, 2);
127         if (ret)
128                 return ret;
129
130         intel_ring_emit(ring, cmd);
131         intel_ring_emit(ring, MI_NOOP);
132         intel_ring_advance(ring);
133
134         return 0;
135 }
136
137 /**
138  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
139  * implementing two workarounds on gen6.  From section 1.4.7.1
140  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
141  *
142  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
143  * produced by non-pipelined state commands), software needs to first
144  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
145  * 0.
146  *
147  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
148  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
149  *
150  * And the workaround for these two requires this workaround first:
151  *
152  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
153  * BEFORE the pipe-control with a post-sync op and no write-cache
154  * flushes.
155  *
156  * And this last workaround is tricky because of the requirements on
157  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
158  * volume 2 part 1:
159  *
160  *     "1 of the following must also be set:
161  *      - Render Target Cache Flush Enable ([12] of DW1)
162  *      - Depth Cache Flush Enable ([0] of DW1)
163  *      - Stall at Pixel Scoreboard ([1] of DW1)
164  *      - Depth Stall ([13] of DW1)
165  *      - Post-Sync Operation ([13] of DW1)
166  *      - Notify Enable ([8] of DW1)"
167  *
168  * The cache flushes require the workaround flush that triggered this
169  * one, so we can't use it.  Depth stall would trigger the same.
170  * Post-sync nonzero is what triggered this second workaround, so we
171  * can't use that one either.  Notify enable is IRQs, which aren't
172  * really our business.  That leaves only stall at scoreboard.
173  */
174 static int
175 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
176 {
177         struct intel_ring *ring = req->ring;
178         u32 scratch_addr =
179                 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
180         int ret;
181
182         ret = intel_ring_begin(req, 6);
183         if (ret)
184                 return ret;
185
186         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
187         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
188                         PIPE_CONTROL_STALL_AT_SCOREBOARD);
189         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
190         intel_ring_emit(ring, 0); /* low dword */
191         intel_ring_emit(ring, 0); /* high dword */
192         intel_ring_emit(ring, MI_NOOP);
193         intel_ring_advance(ring);
194
195         ret = intel_ring_begin(req, 6);
196         if (ret)
197                 return ret;
198
199         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
200         intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
201         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
202         intel_ring_emit(ring, 0);
203         intel_ring_emit(ring, 0);
204         intel_ring_emit(ring, MI_NOOP);
205         intel_ring_advance(ring);
206
207         return 0;
208 }
209
210 static int
211 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
212 {
213         struct intel_ring *ring = req->ring;
214         u32 scratch_addr =
215                 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
216         u32 flags = 0;
217         int ret;
218
219         /* Force SNB workarounds for PIPE_CONTROL flushes */
220         ret = intel_emit_post_sync_nonzero_flush(req);
221         if (ret)
222                 return ret;
223
224         /* Just flush everything.  Experiments have shown that reducing the
225          * number of bits based on the write domains has little performance
226          * impact.
227          */
228         if (mode & EMIT_FLUSH) {
229                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
230                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
231                 /*
232                  * Ensure that any following seqno writes only happen
233                  * when the render cache is indeed flushed.
234                  */
235                 flags |= PIPE_CONTROL_CS_STALL;
236         }
237         if (mode & EMIT_INVALIDATE) {
238                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
239                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
240                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
241                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
242                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
243                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
244                 /*
245                  * TLB invalidate requires a post-sync write.
246                  */
247                 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
248         }
249
250         ret = intel_ring_begin(req, 4);
251         if (ret)
252                 return ret;
253
254         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
255         intel_ring_emit(ring, flags);
256         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
257         intel_ring_emit(ring, 0);
258         intel_ring_advance(ring);
259
260         return 0;
261 }
262
263 static int
264 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
265 {
266         struct intel_ring *ring = req->ring;
267         int ret;
268
269         ret = intel_ring_begin(req, 4);
270         if (ret)
271                 return ret;
272
273         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
274         intel_ring_emit(ring,
275                         PIPE_CONTROL_CS_STALL |
276                         PIPE_CONTROL_STALL_AT_SCOREBOARD);
277         intel_ring_emit(ring, 0);
278         intel_ring_emit(ring, 0);
279         intel_ring_advance(ring);
280
281         return 0;
282 }
283
284 static int
285 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
286 {
287         struct intel_ring *ring = req->ring;
288         u32 scratch_addr =
289                 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
290         u32 flags = 0;
291         int ret;
292
293         /*
294          * Ensure that any following seqno writes only happen when the render
295          * cache is indeed flushed.
296          *
297          * Workaround: 4th PIPE_CONTROL command (except the ones with only
298          * read-cache invalidate bits set) must have the CS_STALL bit set. We
299          * don't try to be clever and just set it unconditionally.
300          */
301         flags |= PIPE_CONTROL_CS_STALL;
302
303         /* Just flush everything.  Experiments have shown that reducing the
304          * number of bits based on the write domains has little performance
305          * impact.
306          */
307         if (mode & EMIT_FLUSH) {
308                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
309                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
310                 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
311                 flags |= PIPE_CONTROL_FLUSH_ENABLE;
312         }
313         if (mode & EMIT_INVALIDATE) {
314                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
315                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
316                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
317                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
318                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
319                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
320                 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
321                 /*
322                  * TLB invalidate requires a post-sync write.
323                  */
324                 flags |= PIPE_CONTROL_QW_WRITE;
325                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
326
327                 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
328
329                 /* Workaround: we must issue a pipe_control with CS-stall bit
330                  * set before a pipe_control command that has the state cache
331                  * invalidate bit set. */
332                 gen7_render_ring_cs_stall_wa(req);
333         }
334
335         ret = intel_ring_begin(req, 4);
336         if (ret)
337                 return ret;
338
339         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
340         intel_ring_emit(ring, flags);
341         intel_ring_emit(ring, scratch_addr);
342         intel_ring_emit(ring, 0);
343         intel_ring_advance(ring);
344
345         return 0;
346 }
347
348 static int
349 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
350                        u32 flags, u32 scratch_addr)
351 {
352         struct intel_ring *ring = req->ring;
353         int ret;
354
355         ret = intel_ring_begin(req, 6);
356         if (ret)
357                 return ret;
358
359         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
360         intel_ring_emit(ring, flags);
361         intel_ring_emit(ring, scratch_addr);
362         intel_ring_emit(ring, 0);
363         intel_ring_emit(ring, 0);
364         intel_ring_emit(ring, 0);
365         intel_ring_advance(ring);
366
367         return 0;
368 }
369
370 static int
371 gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
372 {
373         u32 scratch_addr =
374                 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
375         u32 flags = 0;
376         int ret;
377
378         flags |= PIPE_CONTROL_CS_STALL;
379
380         if (mode & EMIT_FLUSH) {
381                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
382                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
383                 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
384                 flags |= PIPE_CONTROL_FLUSH_ENABLE;
385         }
386         if (mode & EMIT_INVALIDATE) {
387                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
388                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
389                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
390                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
391                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
392                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
393                 flags |= PIPE_CONTROL_QW_WRITE;
394                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
395
396                 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
397                 ret = gen8_emit_pipe_control(req,
398                                              PIPE_CONTROL_CS_STALL |
399                                              PIPE_CONTROL_STALL_AT_SCOREBOARD,
400                                              0);
401                 if (ret)
402                         return ret;
403         }
404
405         return gen8_emit_pipe_control(req, flags, scratch_addr);
406 }
407
408 u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
409 {
410         struct drm_i915_private *dev_priv = engine->i915;
411         u64 acthd;
412
413         if (INTEL_GEN(dev_priv) >= 8)
414                 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
415                                          RING_ACTHD_UDW(engine->mmio_base));
416         else if (INTEL_GEN(dev_priv) >= 4)
417                 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
418         else
419                 acthd = I915_READ(ACTHD);
420
421         return acthd;
422 }
423
424 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
425 {
426         struct drm_i915_private *dev_priv = engine->i915;
427         u32 addr;
428
429         addr = dev_priv->status_page_dmah->busaddr;
430         if (INTEL_GEN(dev_priv) >= 4)
431                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
432         I915_WRITE(HWS_PGA, addr);
433 }
434
435 static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
436 {
437         struct drm_i915_private *dev_priv = engine->i915;
438         i915_reg_t mmio;
439
440         /* The ring status page addresses are no longer next to the rest of
441          * the ring registers as of gen7.
442          */
443         if (IS_GEN7(dev_priv)) {
444                 switch (engine->id) {
445                 case RCS:
446                         mmio = RENDER_HWS_PGA_GEN7;
447                         break;
448                 case BCS:
449                         mmio = BLT_HWS_PGA_GEN7;
450                         break;
451                 /*
452                  * VCS2 actually doesn't exist on Gen7. Only shut up
453                  * gcc switch check warning
454                  */
455                 case VCS2:
456                 case VCS:
457                         mmio = BSD_HWS_PGA_GEN7;
458                         break;
459                 case VECS:
460                         mmio = VEBOX_HWS_PGA_GEN7;
461                         break;
462                 }
463         } else if (IS_GEN6(dev_priv)) {
464                 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
465         } else {
466                 /* XXX: gen8 returns to sanity */
467                 mmio = RING_HWS_PGA(engine->mmio_base);
468         }
469
470         I915_WRITE(mmio, engine->status_page.ggtt_offset);
471         POSTING_READ(mmio);
472
473         /*
474          * Flush the TLB for this page
475          *
476          * FIXME: These two bits have disappeared on gen8, so a question
477          * arises: do we still need this and if so how should we go about
478          * invalidating the TLB?
479          */
480         if (IS_GEN(dev_priv, 6, 7)) {
481                 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
482
483                 /* ring should be idle before issuing a sync flush*/
484                 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
485
486                 I915_WRITE(reg,
487                            _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
488                                               INSTPM_SYNC_FLUSH));
489                 if (intel_wait_for_register(dev_priv,
490                                             reg, INSTPM_SYNC_FLUSH, 0,
491                                             1000))
492                         DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
493                                   engine->name);
494         }
495 }
496
497 static bool stop_ring(struct intel_engine_cs *engine)
498 {
499         struct drm_i915_private *dev_priv = engine->i915;
500
501         if (INTEL_GEN(dev_priv) > 2) {
502                 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
503                 if (intel_wait_for_register(dev_priv,
504                                             RING_MI_MODE(engine->mmio_base),
505                                             MODE_IDLE,
506                                             MODE_IDLE,
507                                             1000)) {
508                         DRM_ERROR("%s : timed out trying to stop ring\n",
509                                   engine->name);
510                         /* Sometimes we observe that the idle flag is not
511                          * set even though the ring is empty. So double
512                          * check before giving up.
513                          */
514                         if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
515                                 return false;
516                 }
517         }
518
519         I915_WRITE_CTL(engine, 0);
520         I915_WRITE_HEAD(engine, 0);
521         I915_WRITE_TAIL(engine, 0);
522
523         if (INTEL_GEN(dev_priv) > 2) {
524                 (void)I915_READ_CTL(engine);
525                 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
526         }
527
528         return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
529 }
530
531 static int init_ring_common(struct intel_engine_cs *engine)
532 {
533         struct drm_i915_private *dev_priv = engine->i915;
534         struct intel_ring *ring = engine->buffer;
535         int ret = 0;
536
537         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
538
539         if (!stop_ring(engine)) {
540                 /* G45 ring initialization often fails to reset head to zero */
541                 DRM_DEBUG_KMS("%s head not reset to zero "
542                               "ctl %08x head %08x tail %08x start %08x\n",
543                               engine->name,
544                               I915_READ_CTL(engine),
545                               I915_READ_HEAD(engine),
546                               I915_READ_TAIL(engine),
547                               I915_READ_START(engine));
548
549                 if (!stop_ring(engine)) {
550                         DRM_ERROR("failed to set %s head to zero "
551                                   "ctl %08x head %08x tail %08x start %08x\n",
552                                   engine->name,
553                                   I915_READ_CTL(engine),
554                                   I915_READ_HEAD(engine),
555                                   I915_READ_TAIL(engine),
556                                   I915_READ_START(engine));
557                         ret = -EIO;
558                         goto out;
559                 }
560         }
561
562         if (HWS_NEEDS_PHYSICAL(dev_priv))
563                 ring_setup_phys_status_page(engine);
564         else
565                 intel_ring_setup_status_page(engine);
566
567         intel_engine_reset_breadcrumbs(engine);
568
569         /* Enforce ordering by reading HEAD register back */
570         I915_READ_HEAD(engine);
571
572         /* Initialize the ring. This must happen _after_ we've cleared the ring
573          * registers with the above sequence (the readback of the HEAD registers
574          * also enforces ordering), otherwise the hw might lose the new ring
575          * register values. */
576         I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
577
578         /* WaClearRingBufHeadRegAtInit:ctg,elk */
579         if (I915_READ_HEAD(engine))
580                 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
581                           engine->name, I915_READ_HEAD(engine));
582
583         intel_ring_update_space(ring);
584         I915_WRITE_HEAD(engine, ring->head);
585         I915_WRITE_TAIL(engine, ring->tail);
586         (void)I915_READ_TAIL(engine);
587
588         I915_WRITE_CTL(engine,
589                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
590                         | RING_VALID);
591
592         /* If the head is still not zero, the ring is dead */
593         if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
594                                        RING_VALID, RING_VALID,
595                                        50)) {
596                 DRM_ERROR("%s initialization failed "
597                           "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
598                           engine->name,
599                           I915_READ_CTL(engine),
600                           I915_READ_CTL(engine) & RING_VALID,
601                           I915_READ_HEAD(engine), ring->head,
602                           I915_READ_TAIL(engine), ring->tail,
603                           I915_READ_START(engine),
604                           i915_ggtt_offset(ring->vma));
605                 ret = -EIO;
606                 goto out;
607         }
608
609         intel_engine_init_hangcheck(engine);
610
611 out:
612         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
613
614         return ret;
615 }
616
617 static void reset_ring_common(struct intel_engine_cs *engine,
618                               struct drm_i915_gem_request *request)
619 {
620         struct intel_ring *ring = request->ring;
621
622         ring->head = request->postfix;
623         ring->last_retired_head = -1;
624 }
625
626 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
627 {
628         struct intel_ring *ring = req->ring;
629         struct i915_workarounds *w = &req->i915->workarounds;
630         int ret, i;
631
632         if (w->count == 0)
633                 return 0;
634
635         ret = req->engine->emit_flush(req, EMIT_BARRIER);
636         if (ret)
637                 return ret;
638
639         ret = intel_ring_begin(req, (w->count * 2 + 2));
640         if (ret)
641                 return ret;
642
643         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
644         for (i = 0; i < w->count; i++) {
645                 intel_ring_emit_reg(ring, w->reg[i].addr);
646                 intel_ring_emit(ring, w->reg[i].value);
647         }
648         intel_ring_emit(ring, MI_NOOP);
649
650         intel_ring_advance(ring);
651
652         ret = req->engine->emit_flush(req, EMIT_BARRIER);
653         if (ret)
654                 return ret;
655
656         DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
657
658         return 0;
659 }
660
661 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
662 {
663         int ret;
664
665         ret = intel_ring_workarounds_emit(req);
666         if (ret != 0)
667                 return ret;
668
669         ret = i915_gem_render_state_init(req);
670         if (ret)
671                 return ret;
672
673         return 0;
674 }
675
676 static int wa_add(struct drm_i915_private *dev_priv,
677                   i915_reg_t addr,
678                   const u32 mask, const u32 val)
679 {
680         const u32 idx = dev_priv->workarounds.count;
681
682         if (WARN_ON(idx >= I915_MAX_WA_REGS))
683                 return -ENOSPC;
684
685         dev_priv->workarounds.reg[idx].addr = addr;
686         dev_priv->workarounds.reg[idx].value = val;
687         dev_priv->workarounds.reg[idx].mask = mask;
688
689         dev_priv->workarounds.count++;
690
691         return 0;
692 }
693
694 #define WA_REG(addr, mask, val) do { \
695                 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
696                 if (r) \
697                         return r; \
698         } while (0)
699
700 #define WA_SET_BIT_MASKED(addr, mask) \
701         WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
702
703 #define WA_CLR_BIT_MASKED(addr, mask) \
704         WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
705
706 #define WA_SET_FIELD_MASKED(addr, mask, value) \
707         WA_REG(addr, mask, _MASKED_FIELD(mask, value))
708
709 #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
710 #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
711
712 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
713
714 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
715                                  i915_reg_t reg)
716 {
717         struct drm_i915_private *dev_priv = engine->i915;
718         struct i915_workarounds *wa = &dev_priv->workarounds;
719         const uint32_t index = wa->hw_whitelist_count[engine->id];
720
721         if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
722                 return -EINVAL;
723
724         WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
725                  i915_mmio_reg_offset(reg));
726         wa->hw_whitelist_count[engine->id]++;
727
728         return 0;
729 }
730
731 static int gen8_init_workarounds(struct intel_engine_cs *engine)
732 {
733         struct drm_i915_private *dev_priv = engine->i915;
734
735         WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
736
737         /* WaDisableAsyncFlipPerfMode:bdw,chv */
738         WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
739
740         /* WaDisablePartialInstShootdown:bdw,chv */
741         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
742                           PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
743
744         /* Use Force Non-Coherent whenever executing a 3D context. This is a
745          * workaround for for a possible hang in the unlikely event a TLB
746          * invalidation occurs during a PSD flush.
747          */
748         /* WaForceEnableNonCoherent:bdw,chv */
749         /* WaHdcDisableFetchWhenMasked:bdw,chv */
750         WA_SET_BIT_MASKED(HDC_CHICKEN0,
751                           HDC_DONOT_FETCH_MEM_WHEN_MASKED |
752                           HDC_FORCE_NON_COHERENT);
753
754         /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
755          * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
756          *  polygons in the same 8x4 pixel/sample area to be processed without
757          *  stalling waiting for the earlier ones to write to Hierarchical Z
758          *  buffer."
759          *
760          * This optimization is off by default for BDW and CHV; turn it on.
761          */
762         WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
763
764         /* Wa4x4STCOptimizationDisable:bdw,chv */
765         WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
766
767         /*
768          * BSpec recommends 8x4 when MSAA is used,
769          * however in practice 16x4 seems fastest.
770          *
771          * Note that PS/WM thread counts depend on the WIZ hashing
772          * disable bit, which we don't touch here, but it's good
773          * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
774          */
775         WA_SET_FIELD_MASKED(GEN7_GT_MODE,
776                             GEN6_WIZ_HASHING_MASK,
777                             GEN6_WIZ_HASHING_16x4);
778
779         return 0;
780 }
781
782 static int bdw_init_workarounds(struct intel_engine_cs *engine)
783 {
784         struct drm_i915_private *dev_priv = engine->i915;
785         int ret;
786
787         ret = gen8_init_workarounds(engine);
788         if (ret)
789                 return ret;
790
791         /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
792         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
793
794         /* WaDisableDopClockGating:bdw */
795         WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
796                           DOP_CLOCK_GATING_DISABLE);
797
798         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
799                           GEN8_SAMPLER_POWER_BYPASS_DIS);
800
801         WA_SET_BIT_MASKED(HDC_CHICKEN0,
802                           /* WaForceContextSaveRestoreNonCoherent:bdw */
803                           HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
804                           /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
805                           (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
806
807         return 0;
808 }
809
810 static int chv_init_workarounds(struct intel_engine_cs *engine)
811 {
812         struct drm_i915_private *dev_priv = engine->i915;
813         int ret;
814
815         ret = gen8_init_workarounds(engine);
816         if (ret)
817                 return ret;
818
819         /* WaDisableThreadStallDopClockGating:chv */
820         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
821
822         /* Improve HiZ throughput on CHV. */
823         WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
824
825         return 0;
826 }
827
828 static int gen9_init_workarounds(struct intel_engine_cs *engine)
829 {
830         struct drm_i915_private *dev_priv = engine->i915;
831         int ret;
832
833         /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
834         I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
835
836         /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
837         I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
838                    GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
839
840         /* WaDisableKillLogic:bxt,skl,kbl */
841         I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
842                    ECOCHK_DIS_TLB);
843
844         /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
845         /* WaDisablePartialInstShootdown:skl,bxt,kbl */
846         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
847                           FLOW_CONTROL_ENABLE |
848                           PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
849
850         /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
851         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
852                           GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
853
854         /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
855         if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
856             IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
857                 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
858                                   GEN9_DG_MIRROR_FIX_ENABLE);
859
860         /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
861         if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
862             IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
863                 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
864                                   GEN9_RHWO_OPTIMIZATION_DISABLE);
865                 /*
866                  * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
867                  * but we do that in per ctx batchbuffer as there is an issue
868                  * with this register not getting restored on ctx restore
869                  */
870         }
871
872         /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
873         /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
874         WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
875                           GEN9_ENABLE_YV12_BUGFIX |
876                           GEN9_ENABLE_GPGPU_PREEMPTION);
877
878         /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
879         /* WaDisablePartialResolveInVc:skl,bxt,kbl */
880         WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
881                                          GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
882
883         /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
884         WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
885                           GEN9_CCS_TLB_PREFETCH_ENABLE);
886
887         /* WaDisableMaskBasedCammingInRCC:skl,bxt */
888         if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
889             IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
890                 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
891                                   PIXEL_MASK_CAMMING_DISABLE);
892
893         /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
894         WA_SET_BIT_MASKED(HDC_CHICKEN0,
895                           HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
896                           HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
897
898         /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
899          * both tied to WaForceContextSaveRestoreNonCoherent
900          * in some hsds for skl. We keep the tie for all gen9. The
901          * documentation is a bit hazy and so we want to get common behaviour,
902          * even though there is no clear evidence we would need both on kbl/bxt.
903          * This area has been source of system hangs so we play it safe
904          * and mimic the skl regardless of what bspec says.
905          *
906          * Use Force Non-Coherent whenever executing a 3D context. This
907          * is a workaround for a possible hang in the unlikely event
908          * a TLB invalidation occurs during a PSD flush.
909          */
910
911         /* WaForceEnableNonCoherent:skl,bxt,kbl */
912         WA_SET_BIT_MASKED(HDC_CHICKEN0,
913                           HDC_FORCE_NON_COHERENT);
914
915         /* WaDisableHDCInvalidation:skl,bxt,kbl */
916         I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
917                    BDW_DISABLE_HDC_INVALIDATION);
918
919         /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
920         if (IS_SKYLAKE(dev_priv) ||
921             IS_KABYLAKE(dev_priv) ||
922             IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
923                 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
924                                   GEN8_SAMPLER_POWER_BYPASS_DIS);
925
926         /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
927         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
928
929         /* WaOCLCoherentLineFlush:skl,bxt,kbl */
930         I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
931                                     GEN8_LQSC_FLUSH_COHERENT_LINES));
932
933         /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
934         ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
935         if (ret)
936                 return ret;
937
938         /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
939         ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
940         if (ret)
941                 return ret;
942
943         /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
944         ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
945         if (ret)
946                 return ret;
947
948         return 0;
949 }
950
951 static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
952 {
953         struct drm_i915_private *dev_priv = engine->i915;
954         u8 vals[3] = { 0, 0, 0 };
955         unsigned int i;
956
957         for (i = 0; i < 3; i++) {
958                 u8 ss;
959
960                 /*
961                  * Only consider slices where one, and only one, subslice has 7
962                  * EUs
963                  */
964                 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
965                         continue;
966
967                 /*
968                  * subslice_7eu[i] != 0 (because of the check above) and
969                  * ss_max == 4 (maximum number of subslices possible per slice)
970                  *
971                  * ->    0 <= ss <= 3;
972                  */
973                 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
974                 vals[i] = 3 - ss;
975         }
976
977         if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
978                 return 0;
979
980         /* Tune IZ hashing. See intel_device_info_runtime_init() */
981         WA_SET_FIELD_MASKED(GEN7_GT_MODE,
982                             GEN9_IZ_HASHING_MASK(2) |
983                             GEN9_IZ_HASHING_MASK(1) |
984                             GEN9_IZ_HASHING_MASK(0),
985                             GEN9_IZ_HASHING(2, vals[2]) |
986                             GEN9_IZ_HASHING(1, vals[1]) |
987                             GEN9_IZ_HASHING(0, vals[0]));
988
989         return 0;
990 }
991
992 static int skl_init_workarounds(struct intel_engine_cs *engine)
993 {
994         struct drm_i915_private *dev_priv = engine->i915;
995         int ret;
996
997         ret = gen9_init_workarounds(engine);
998         if (ret)
999                 return ret;
1000
1001         /*
1002          * Actual WA is to disable percontext preemption granularity control
1003          * until D0 which is the default case so this is equivalent to
1004          * !WaDisablePerCtxtPreemptionGranularityControl:skl
1005          */
1006         if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
1007                 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1008                            _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1009         }
1010
1011         if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
1012                 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
1013                 I915_WRITE(FF_SLICE_CS_CHICKEN2,
1014                            _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
1015         }
1016
1017         /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1018          * involving this register should also be added to WA batch as required.
1019          */
1020         if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
1021                 /* WaDisableLSQCROPERFforOCL:skl */
1022                 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1023                            GEN8_LQSC_RO_PERF_DIS);
1024
1025         /* WaEnableGapsTsvCreditFix:skl */
1026         if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
1027                 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1028                                            GEN9_GAPS_TSV_CREDIT_DISABLE));
1029         }
1030
1031         /* WaDisablePowerCompilerClockGating:skl */
1032         if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
1033                 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1034                                   BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1035
1036         /* WaBarrierPerformanceFixDisable:skl */
1037         if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
1038                 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1039                                   HDC_FENCE_DEST_SLM_DISABLE |
1040                                   HDC_BARRIER_PERFORMANCE_DISABLE);
1041
1042         /* WaDisableSbeCacheDispatchPortSharing:skl */
1043         if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
1044                 WA_SET_BIT_MASKED(
1045                         GEN7_HALF_SLICE_CHICKEN1,
1046                         GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1047
1048         /* WaDisableGafsUnitClkGating:skl */
1049         WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1050
1051         /* WaInPlaceDecompressionHang:skl */
1052         if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
1053                 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1054                            GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1055
1056         /* WaDisableLSQCROPERFforOCL:skl */
1057         ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1058         if (ret)
1059                 return ret;
1060
1061         return skl_tune_iz_hashing(engine);
1062 }
1063
1064 static int bxt_init_workarounds(struct intel_engine_cs *engine)
1065 {
1066         struct drm_i915_private *dev_priv = engine->i915;
1067         int ret;
1068
1069         ret = gen9_init_workarounds(engine);
1070         if (ret)
1071                 return ret;
1072
1073         /* WaStoreMultiplePTEenable:bxt */
1074         /* This is a requirement according to Hardware specification */
1075         if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1076                 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1077
1078         /* WaSetClckGatingDisableMedia:bxt */
1079         if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1080                 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1081                                             ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1082         }
1083
1084         /* WaDisableThreadStallDopClockGating:bxt */
1085         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1086                           STALL_DOP_GATING_DISABLE);
1087
1088         /* WaDisablePooledEuLoadBalancingFix:bxt */
1089         if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1090                 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
1091                                   GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1092         }
1093
1094         /* WaDisableSbeCacheDispatchPortSharing:bxt */
1095         if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
1096                 WA_SET_BIT_MASKED(
1097                         GEN7_HALF_SLICE_CHICKEN1,
1098                         GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1099         }
1100
1101         /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
1102         /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1103         /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1104         /* WaDisableLSQCROPERFforOCL:bxt */
1105         if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1106                 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1107                 if (ret)
1108                         return ret;
1109
1110                 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1111                 if (ret)
1112                         return ret;
1113         }
1114
1115         /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1116         if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
1117                 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1118                                            L3_HIGH_PRIO_CREDITS(2));
1119
1120         /* WaToEnableHwFixForPushConstHWBug:bxt */
1121         if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1122                 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1123                                   GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1124
1125         /* WaInPlaceDecompressionHang:bxt */
1126         if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1127                 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1128                            GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1129
1130         return 0;
1131 }
1132
1133 static int kbl_init_workarounds(struct intel_engine_cs *engine)
1134 {
1135         struct drm_i915_private *dev_priv = engine->i915;
1136         int ret;
1137
1138         ret = gen9_init_workarounds(engine);
1139         if (ret)
1140                 return ret;
1141
1142         /* WaEnableGapsTsvCreditFix:kbl */
1143         I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1144                                    GEN9_GAPS_TSV_CREDIT_DISABLE));
1145
1146         /* WaDisableDynamicCreditSharing:kbl */
1147         if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1148                 WA_SET_BIT(GAMT_CHKN_BIT_REG,
1149                            GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1150
1151         /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1152         if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1153                 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1154                                   HDC_FENCE_DEST_SLM_DISABLE);
1155
1156         /* WaToEnableHwFixForPushConstHWBug:kbl */
1157         if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1158                 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1159                                   GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1160
1161         /* WaDisableGafsUnitClkGating:kbl */
1162         WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1163
1164         /* WaDisableSbeCacheDispatchPortSharing:kbl */
1165         WA_SET_BIT_MASKED(
1166                 GEN7_HALF_SLICE_CHICKEN1,
1167                 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1168
1169         /* WaInPlaceDecompressionHang:kbl */
1170         WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1171                    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1172
1173         /* WaDisableLSQCROPERFforOCL:kbl */
1174         ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1175         if (ret)
1176                 return ret;
1177
1178         return 0;
1179 }
1180
1181 int init_workarounds_ring(struct intel_engine_cs *engine)
1182 {
1183         struct drm_i915_private *dev_priv = engine->i915;
1184
1185         WARN_ON(engine->id != RCS);
1186
1187         dev_priv->workarounds.count = 0;
1188         dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
1189
1190         if (IS_BROADWELL(dev_priv))
1191                 return bdw_init_workarounds(engine);
1192
1193         if (IS_CHERRYVIEW(dev_priv))
1194                 return chv_init_workarounds(engine);
1195
1196         if (IS_SKYLAKE(dev_priv))
1197                 return skl_init_workarounds(engine);
1198
1199         if (IS_BROXTON(dev_priv))
1200                 return bxt_init_workarounds(engine);
1201
1202         if (IS_KABYLAKE(dev_priv))
1203                 return kbl_init_workarounds(engine);
1204
1205         return 0;
1206 }
1207
1208 static int init_render_ring(struct intel_engine_cs *engine)
1209 {
1210         struct drm_i915_private *dev_priv = engine->i915;
1211         int ret = init_ring_common(engine);
1212         if (ret)
1213                 return ret;
1214
1215         /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
1216         if (IS_GEN(dev_priv, 4, 6))
1217                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
1218
1219         /* We need to disable the AsyncFlip performance optimisations in order
1220          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1221          * programmed to '1' on all products.
1222          *
1223          * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
1224          */
1225         if (IS_GEN(dev_priv, 6, 7))
1226                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1227
1228         /* Required for the hardware to program scanline values for waiting */
1229         /* WaEnableFlushTlbInvalidationMode:snb */
1230         if (IS_GEN6(dev_priv))
1231                 I915_WRITE(GFX_MODE,
1232                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
1233
1234         /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
1235         if (IS_GEN7(dev_priv))
1236                 I915_WRITE(GFX_MODE_GEN7,
1237                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
1238                            _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
1239
1240         if (IS_GEN6(dev_priv)) {
1241                 /* From the Sandybridge PRM, volume 1 part 3, page 24:
1242                  * "If this bit is set, STCunit will have LRA as replacement
1243                  *  policy. [...] This bit must be reset.  LRA replacement
1244                  *  policy is not supported."
1245                  */
1246                 I915_WRITE(CACHE_MODE_0,
1247                            _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
1248         }
1249
1250         if (IS_GEN(dev_priv, 6, 7))
1251                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1252
1253         if (INTEL_INFO(dev_priv)->gen >= 6)
1254                 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1255
1256         return init_workarounds_ring(engine);
1257 }
1258
1259 static void render_ring_cleanup(struct intel_engine_cs *engine)
1260 {
1261         struct drm_i915_private *dev_priv = engine->i915;
1262
1263         i915_vma_unpin_and_release(&dev_priv->semaphore);
1264 }
1265
1266 static int gen8_rcs_signal(struct drm_i915_gem_request *req)
1267 {
1268         struct intel_ring *ring = req->ring;
1269         struct drm_i915_private *dev_priv = req->i915;
1270         struct intel_engine_cs *waiter;
1271         enum intel_engine_id id;
1272         int ret, num_rings;
1273
1274         num_rings = INTEL_INFO(dev_priv)->num_rings;
1275         ret = intel_ring_begin(req, (num_rings-1) * 8);
1276         if (ret)
1277                 return ret;
1278
1279         for_each_engine_id(waiter, dev_priv, id) {
1280                 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
1281                 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1282                         continue;
1283
1284                 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1285                 intel_ring_emit(ring,
1286                                 PIPE_CONTROL_GLOBAL_GTT_IVB |
1287                                 PIPE_CONTROL_QW_WRITE |
1288                                 PIPE_CONTROL_CS_STALL);
1289                 intel_ring_emit(ring, lower_32_bits(gtt_offset));
1290                 intel_ring_emit(ring, upper_32_bits(gtt_offset));
1291                 intel_ring_emit(ring, req->fence.seqno);
1292                 intel_ring_emit(ring, 0);
1293                 intel_ring_emit(ring,
1294                                 MI_SEMAPHORE_SIGNAL |
1295                                 MI_SEMAPHORE_TARGET(waiter->hw_id));
1296                 intel_ring_emit(ring, 0);
1297         }
1298         intel_ring_advance(ring);
1299
1300         return 0;
1301 }
1302
1303 static int gen8_xcs_signal(struct drm_i915_gem_request *req)
1304 {
1305         struct intel_ring *ring = req->ring;
1306         struct drm_i915_private *dev_priv = req->i915;
1307         struct intel_engine_cs *waiter;
1308         enum intel_engine_id id;
1309         int ret, num_rings;
1310
1311         num_rings = INTEL_INFO(dev_priv)->num_rings;
1312         ret = intel_ring_begin(req, (num_rings-1) * 6);
1313         if (ret)
1314                 return ret;
1315
1316         for_each_engine_id(waiter, dev_priv, id) {
1317                 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
1318                 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1319                         continue;
1320
1321                 intel_ring_emit(ring,
1322                                 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1323                 intel_ring_emit(ring,
1324                                 lower_32_bits(gtt_offset) |
1325                                 MI_FLUSH_DW_USE_GTT);
1326                 intel_ring_emit(ring, upper_32_bits(gtt_offset));
1327                 intel_ring_emit(ring, req->fence.seqno);
1328                 intel_ring_emit(ring,
1329                                 MI_SEMAPHORE_SIGNAL |
1330                                 MI_SEMAPHORE_TARGET(waiter->hw_id));
1331                 intel_ring_emit(ring, 0);
1332         }
1333         intel_ring_advance(ring);
1334
1335         return 0;
1336 }
1337
1338 static int gen6_signal(struct drm_i915_gem_request *req)
1339 {
1340         struct intel_ring *ring = req->ring;
1341         struct drm_i915_private *dev_priv = req->i915;
1342         struct intel_engine_cs *engine;
1343         int ret, num_rings;
1344
1345         num_rings = INTEL_INFO(dev_priv)->num_rings;
1346         ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
1347         if (ret)
1348                 return ret;
1349
1350         for_each_engine(engine, dev_priv) {
1351                 i915_reg_t mbox_reg;
1352
1353                 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
1354                         continue;
1355
1356                 mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
1357                 if (i915_mmio_reg_valid(mbox_reg)) {
1358                         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1359                         intel_ring_emit_reg(ring, mbox_reg);
1360                         intel_ring_emit(ring, req->fence.seqno);
1361                 }
1362         }
1363
1364         /* If num_dwords was rounded, make sure the tail pointer is correct */
1365         if (num_rings % 2 == 0)
1366                 intel_ring_emit(ring, MI_NOOP);
1367         intel_ring_advance(ring);
1368
1369         return 0;
1370 }
1371
1372 static void i9xx_submit_request(struct drm_i915_gem_request *request)
1373 {
1374         struct drm_i915_private *dev_priv = request->i915;
1375
1376         I915_WRITE_TAIL(request->engine,
1377                         intel_ring_offset(request->ring, request->tail));
1378 }
1379
1380 static int i9xx_emit_request(struct drm_i915_gem_request *req)
1381 {
1382         struct intel_ring *ring = req->ring;
1383         int ret;
1384
1385         ret = intel_ring_begin(req, 4);
1386         if (ret)
1387                 return ret;
1388
1389         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1390         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1391         intel_ring_emit(ring, req->fence.seqno);
1392         intel_ring_emit(ring, MI_USER_INTERRUPT);
1393         intel_ring_advance(ring);
1394
1395         req->tail = ring->tail;
1396
1397         return 0;
1398 }
1399
1400 /**
1401  * gen6_sema_emit_request - Update the semaphore mailbox registers
1402  *
1403  * @request - request to write to the ring
1404  *
1405  * Update the mailbox registers in the *other* rings with the current seqno.
1406  * This acts like a signal in the canonical semaphore.
1407  */
1408 static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
1409 {
1410         int ret;
1411
1412         ret = req->engine->semaphore.signal(req);
1413         if (ret)
1414                 return ret;
1415
1416         return i9xx_emit_request(req);
1417 }
1418
1419 static int gen8_render_emit_request(struct drm_i915_gem_request *req)
1420 {
1421         struct intel_engine_cs *engine = req->engine;
1422         struct intel_ring *ring = req->ring;
1423         int ret;
1424
1425         if (engine->semaphore.signal) {
1426                 ret = engine->semaphore.signal(req);
1427                 if (ret)
1428                         return ret;
1429         }
1430
1431         ret = intel_ring_begin(req, 8);
1432         if (ret)
1433                 return ret;
1434
1435         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1436         intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
1437                                PIPE_CONTROL_CS_STALL |
1438                                PIPE_CONTROL_QW_WRITE));
1439         intel_ring_emit(ring, intel_hws_seqno_address(engine));
1440         intel_ring_emit(ring, 0);
1441         intel_ring_emit(ring, i915_gem_request_get_seqno(req));
1442         /* We're thrashing one dword of HWS. */
1443         intel_ring_emit(ring, 0);
1444         intel_ring_emit(ring, MI_USER_INTERRUPT);
1445         intel_ring_emit(ring, MI_NOOP);
1446         intel_ring_advance(ring);
1447
1448         req->tail = ring->tail;
1449
1450         return 0;
1451 }
1452
1453 /**
1454  * intel_ring_sync - sync the waiter to the signaller on seqno
1455  *
1456  * @waiter - ring that is waiting
1457  * @signaller - ring which has, or will signal
1458  * @seqno - seqno which the waiter will block on
1459  */
1460
1461 static int
1462 gen8_ring_sync_to(struct drm_i915_gem_request *req,
1463                   struct drm_i915_gem_request *signal)
1464 {
1465         struct intel_ring *ring = req->ring;
1466         struct drm_i915_private *dev_priv = req->i915;
1467         u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
1468         struct i915_hw_ppgtt *ppgtt;
1469         int ret;
1470
1471         ret = intel_ring_begin(req, 4);
1472         if (ret)
1473                 return ret;
1474
1475         intel_ring_emit(ring,
1476                         MI_SEMAPHORE_WAIT |
1477                         MI_SEMAPHORE_GLOBAL_GTT |
1478                         MI_SEMAPHORE_SAD_GTE_SDD);
1479         intel_ring_emit(ring, signal->fence.seqno);
1480         intel_ring_emit(ring, lower_32_bits(offset));
1481         intel_ring_emit(ring, upper_32_bits(offset));
1482         intel_ring_advance(ring);
1483
1484         /* When the !RCS engines idle waiting upon a semaphore, they lose their
1485          * pagetables and we must reload them before executing the batch.
1486          * We do this on the i915_switch_context() following the wait and
1487          * before the dispatch.
1488          */
1489         ppgtt = req->ctx->ppgtt;
1490         if (ppgtt && req->engine->id != RCS)
1491                 ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
1492         return 0;
1493 }
1494
1495 static int
1496 gen6_ring_sync_to(struct drm_i915_gem_request *req,
1497                   struct drm_i915_gem_request *signal)
1498 {
1499         struct intel_ring *ring = req->ring;
1500         u32 dw1 = MI_SEMAPHORE_MBOX |
1501                   MI_SEMAPHORE_COMPARE |
1502                   MI_SEMAPHORE_REGISTER;
1503         u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
1504         int ret;
1505
1506         WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
1507
1508         ret = intel_ring_begin(req, 4);
1509         if (ret)
1510                 return ret;
1511
1512         intel_ring_emit(ring, dw1 | wait_mbox);
1513         /* Throughout all of the GEM code, seqno passed implies our current
1514          * seqno is >= the last seqno executed. However for hardware the
1515          * comparison is strictly greater than.
1516          */
1517         intel_ring_emit(ring, signal->fence.seqno - 1);
1518         intel_ring_emit(ring, 0);
1519         intel_ring_emit(ring, MI_NOOP);
1520         intel_ring_advance(ring);
1521
1522         return 0;
1523 }
1524
1525 static void
1526 gen5_seqno_barrier(struct intel_engine_cs *engine)
1527 {
1528         /* MI_STORE are internally buffered by the GPU and not flushed
1529          * either by MI_FLUSH or SyncFlush or any other combination of
1530          * MI commands.
1531          *
1532          * "Only the submission of the store operation is guaranteed.
1533          * The write result will be complete (coherent) some time later
1534          * (this is practically a finite period but there is no guaranteed
1535          * latency)."
1536          *
1537          * Empirically, we observe that we need a delay of at least 75us to
1538          * be sure that the seqno write is visible by the CPU.
1539          */
1540         usleep_range(125, 250);
1541 }
1542
1543 static void
1544 gen6_seqno_barrier(struct intel_engine_cs *engine)
1545 {
1546         struct drm_i915_private *dev_priv = engine->i915;
1547
1548         /* Workaround to force correct ordering between irq and seqno writes on
1549          * ivb (and maybe also on snb) by reading from a CS register (like
1550          * ACTHD) before reading the status page.
1551          *
1552          * Note that this effectively stalls the read by the time it takes to
1553          * do a memory transaction, which more or less ensures that the write
1554          * from the GPU has sufficient time to invalidate the CPU cacheline.
1555          * Alternatively we could delay the interrupt from the CS ring to give
1556          * the write time to land, but that would incur a delay after every
1557          * batch i.e. much more frequent than a delay when waiting for the
1558          * interrupt (with the same net latency).
1559          *
1560          * Also note that to prevent whole machine hangs on gen7, we have to
1561          * take the spinlock to guard against concurrent cacheline access.
1562          */
1563         spin_lock_irq(&dev_priv->uncore.lock);
1564         POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
1565         spin_unlock_irq(&dev_priv->uncore.lock);
1566 }
1567
1568 static void
1569 gen5_irq_enable(struct intel_engine_cs *engine)
1570 {
1571         gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
1572 }
1573
1574 static void
1575 gen5_irq_disable(struct intel_engine_cs *engine)
1576 {
1577         gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
1578 }
1579
1580 static void
1581 i9xx_irq_enable(struct intel_engine_cs *engine)
1582 {
1583         struct drm_i915_private *dev_priv = engine->i915;
1584
1585         dev_priv->irq_mask &= ~engine->irq_enable_mask;
1586         I915_WRITE(IMR, dev_priv->irq_mask);
1587         POSTING_READ_FW(RING_IMR(engine->mmio_base));
1588 }
1589
1590 static void
1591 i9xx_irq_disable(struct intel_engine_cs *engine)
1592 {
1593         struct drm_i915_private *dev_priv = engine->i915;
1594
1595         dev_priv->irq_mask |= engine->irq_enable_mask;
1596         I915_WRITE(IMR, dev_priv->irq_mask);
1597 }
1598
1599 static void
1600 i8xx_irq_enable(struct intel_engine_cs *engine)
1601 {
1602         struct drm_i915_private *dev_priv = engine->i915;
1603
1604         dev_priv->irq_mask &= ~engine->irq_enable_mask;
1605         I915_WRITE16(IMR, dev_priv->irq_mask);
1606         POSTING_READ16(RING_IMR(engine->mmio_base));
1607 }
1608
1609 static void
1610 i8xx_irq_disable(struct intel_engine_cs *engine)
1611 {
1612         struct drm_i915_private *dev_priv = engine->i915;
1613
1614         dev_priv->irq_mask |= engine->irq_enable_mask;
1615         I915_WRITE16(IMR, dev_priv->irq_mask);
1616 }
1617
1618 static int
1619 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1620 {
1621         struct intel_ring *ring = req->ring;
1622         int ret;
1623
1624         ret = intel_ring_begin(req, 2);
1625         if (ret)
1626                 return ret;
1627
1628         intel_ring_emit(ring, MI_FLUSH);
1629         intel_ring_emit(ring, MI_NOOP);
1630         intel_ring_advance(ring);
1631         return 0;
1632 }
1633
1634 static void
1635 gen6_irq_enable(struct intel_engine_cs *engine)
1636 {
1637         struct drm_i915_private *dev_priv = engine->i915;
1638
1639         I915_WRITE_IMR(engine,
1640                        ~(engine->irq_enable_mask |
1641                          engine->irq_keep_mask));
1642         gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1643 }
1644
1645 static void
1646 gen6_irq_disable(struct intel_engine_cs *engine)
1647 {
1648         struct drm_i915_private *dev_priv = engine->i915;
1649
1650         I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1651         gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1652 }
1653
1654 static void
1655 hsw_vebox_irq_enable(struct intel_engine_cs *engine)
1656 {
1657         struct drm_i915_private *dev_priv = engine->i915;
1658
1659         I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1660         gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
1661 }
1662
1663 static void
1664 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1665 {
1666         struct drm_i915_private *dev_priv = engine->i915;
1667
1668         I915_WRITE_IMR(engine, ~0);
1669         gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
1670 }
1671
1672 static void
1673 gen8_irq_enable(struct intel_engine_cs *engine)
1674 {
1675         struct drm_i915_private *dev_priv = engine->i915;
1676
1677         I915_WRITE_IMR(engine,
1678                        ~(engine->irq_enable_mask |
1679                          engine->irq_keep_mask));
1680         POSTING_READ_FW(RING_IMR(engine->mmio_base));
1681 }
1682
1683 static void
1684 gen8_irq_disable(struct intel_engine_cs *engine)
1685 {
1686         struct drm_i915_private *dev_priv = engine->i915;
1687
1688         I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1689 }
1690
1691 static int
1692 i965_emit_bb_start(struct drm_i915_gem_request *req,
1693                    u64 offset, u32 length,
1694                    unsigned int dispatch_flags)
1695 {
1696         struct intel_ring *ring = req->ring;
1697         int ret;
1698
1699         ret = intel_ring_begin(req, 2);
1700         if (ret)
1701                 return ret;
1702
1703         intel_ring_emit(ring,
1704                         MI_BATCH_BUFFER_START |
1705                         MI_BATCH_GTT |
1706                         (dispatch_flags & I915_DISPATCH_SECURE ?
1707                          0 : MI_BATCH_NON_SECURE_I965));
1708         intel_ring_emit(ring, offset);
1709         intel_ring_advance(ring);
1710
1711         return 0;
1712 }
1713
1714 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1715 #define I830_BATCH_LIMIT (256*1024)
1716 #define I830_TLB_ENTRIES (2)
1717 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1718 static int
1719 i830_emit_bb_start(struct drm_i915_gem_request *req,
1720                    u64 offset, u32 len,
1721                    unsigned int dispatch_flags)
1722 {
1723         struct intel_ring *ring = req->ring;
1724         u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
1725         int ret;
1726
1727         ret = intel_ring_begin(req, 6);
1728         if (ret)
1729                 return ret;
1730
1731         /* Evict the invalid PTE TLBs */
1732         intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
1733         intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
1734         intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
1735         intel_ring_emit(ring, cs_offset);
1736         intel_ring_emit(ring, 0xdeadbeef);
1737         intel_ring_emit(ring, MI_NOOP);
1738         intel_ring_advance(ring);
1739
1740         if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1741                 if (len > I830_BATCH_LIMIT)
1742                         return -ENOSPC;
1743
1744                 ret = intel_ring_begin(req, 6 + 2);
1745                 if (ret)
1746                         return ret;
1747
1748                 /* Blit the batch (which has now all relocs applied) to the
1749                  * stable batch scratch bo area (so that the CS never
1750                  * stumbles over its tlb invalidation bug) ...
1751                  */
1752                 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
1753                 intel_ring_emit(ring,
1754                                 BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
1755                 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
1756                 intel_ring_emit(ring, cs_offset);
1757                 intel_ring_emit(ring, 4096);
1758                 intel_ring_emit(ring, offset);
1759
1760                 intel_ring_emit(ring, MI_FLUSH);
1761                 intel_ring_emit(ring, MI_NOOP);
1762                 intel_ring_advance(ring);
1763
1764                 /* ... and execute it. */
1765                 offset = cs_offset;
1766         }
1767
1768         ret = intel_ring_begin(req, 2);
1769         if (ret)
1770                 return ret;
1771
1772         intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1773         intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1774                                         0 : MI_BATCH_NON_SECURE));
1775         intel_ring_advance(ring);
1776
1777         return 0;
1778 }
1779
1780 static int
1781 i915_emit_bb_start(struct drm_i915_gem_request *req,
1782                    u64 offset, u32 len,
1783                    unsigned int dispatch_flags)
1784 {
1785         struct intel_ring *ring = req->ring;
1786         int ret;
1787
1788         ret = intel_ring_begin(req, 2);
1789         if (ret)
1790                 return ret;
1791
1792         intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1793         intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1794                                         0 : MI_BATCH_NON_SECURE));
1795         intel_ring_advance(ring);
1796
1797         return 0;
1798 }
1799
1800 static void cleanup_phys_status_page(struct intel_engine_cs *engine)
1801 {
1802         struct drm_i915_private *dev_priv = engine->i915;
1803
1804         if (!dev_priv->status_page_dmah)
1805                 return;
1806
1807         drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
1808         engine->status_page.page_addr = NULL;
1809 }
1810
1811 static void cleanup_status_page(struct intel_engine_cs *engine)
1812 {
1813         struct i915_vma *vma;
1814
1815         vma = fetch_and_zero(&engine->status_page.vma);
1816         if (!vma)
1817                 return;
1818
1819         i915_vma_unpin(vma);
1820         i915_gem_object_unpin_map(vma->obj);
1821         i915_vma_put(vma);
1822 }
1823
1824 static int init_status_page(struct intel_engine_cs *engine)
1825 {
1826         struct drm_i915_gem_object *obj;
1827         struct i915_vma *vma;
1828         unsigned int flags;
1829         int ret;
1830
1831         obj = i915_gem_object_create(&engine->i915->drm, 4096);
1832         if (IS_ERR(obj)) {
1833                 DRM_ERROR("Failed to allocate status page\n");
1834                 return PTR_ERR(obj);
1835         }
1836
1837         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1838         if (ret)
1839                 goto err;
1840
1841         vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
1842         if (IS_ERR(vma)) {
1843                 ret = PTR_ERR(vma);
1844                 goto err;
1845         }
1846
1847         flags = PIN_GLOBAL;
1848         if (!HAS_LLC(engine->i915))
1849                 /* On g33, we cannot place HWS above 256MiB, so
1850                  * restrict its pinning to the low mappable arena.
1851                  * Though this restriction is not documented for
1852                  * gen4, gen5, or byt, they also behave similarly
1853                  * and hang if the HWS is placed at the top of the
1854                  * GTT. To generalise, it appears that all !llc
1855                  * platforms have issues with us placing the HWS
1856                  * above the mappable region (even though we never
1857                  * actualy map it).
1858                  */
1859                 flags |= PIN_MAPPABLE;
1860         ret = i915_vma_pin(vma, 0, 4096, flags);
1861         if (ret)
1862                 goto err;
1863
1864         engine->status_page.vma = vma;
1865         engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
1866         engine->status_page.page_addr =
1867                 i915_gem_object_pin_map(obj, I915_MAP_WB);
1868
1869         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1870                          engine->name, i915_ggtt_offset(vma));
1871         return 0;
1872
1873 err:
1874         i915_gem_object_put(obj);
1875         return ret;
1876 }
1877
1878 static int init_phys_status_page(struct intel_engine_cs *engine)
1879 {
1880         struct drm_i915_private *dev_priv = engine->i915;
1881
1882         dev_priv->status_page_dmah =
1883                 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
1884         if (!dev_priv->status_page_dmah)
1885                 return -ENOMEM;
1886
1887         engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1888         memset(engine->status_page.page_addr, 0, PAGE_SIZE);
1889
1890         return 0;
1891 }
1892
1893 int intel_ring_pin(struct intel_ring *ring)
1894 {
1895         /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1896         unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
1897         enum i915_map_type map;
1898         struct i915_vma *vma = ring->vma;
1899         void *addr;
1900         int ret;
1901
1902         GEM_BUG_ON(ring->vaddr);
1903
1904         map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
1905
1906         if (vma->obj->stolen)
1907                 flags |= PIN_MAPPABLE;
1908
1909         if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1910                 if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
1911                         ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1912                 else
1913                         ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
1914                 if (unlikely(ret))
1915                         return ret;
1916         }
1917
1918         ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
1919         if (unlikely(ret))
1920                 return ret;
1921
1922         if (i915_vma_is_map_and_fenceable(vma))
1923                 addr = (void __force *)i915_vma_pin_iomap(vma);
1924         else
1925                 addr = i915_gem_object_pin_map(vma->obj, map);
1926         if (IS_ERR(addr))
1927                 goto err;
1928
1929         ring->vaddr = addr;
1930         return 0;
1931
1932 err:
1933         i915_vma_unpin(vma);
1934         return PTR_ERR(addr);
1935 }
1936
1937 void intel_ring_unpin(struct intel_ring *ring)
1938 {
1939         GEM_BUG_ON(!ring->vma);
1940         GEM_BUG_ON(!ring->vaddr);
1941
1942         if (i915_vma_is_map_and_fenceable(ring->vma))
1943                 i915_vma_unpin_iomap(ring->vma);
1944         else
1945                 i915_gem_object_unpin_map(ring->vma->obj);
1946         ring->vaddr = NULL;
1947
1948         i915_vma_unpin(ring->vma);
1949 }
1950
1951 static struct i915_vma *
1952 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1953 {
1954         struct i915_address_space *vm = &dev_priv->ggtt.base;
1955         struct drm_i915_gem_object *obj;
1956         struct i915_vma *vma;
1957
1958         obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
1959         if (!obj)
1960                 obj = i915_gem_object_create(&dev_priv->drm, size);
1961         if (IS_ERR(obj))
1962                 return ERR_CAST(obj);
1963
1964         /*
1965          * Mark ring buffers as read-only from GPU side (so no stray overwrites)
1966          * if supported by the platform's GGTT.
1967          */
1968         if (vm->has_read_only)
1969                 i915_gem_object_set_readonly(obj);
1970
1971         vma = i915_vma_create(obj, vm, NULL);
1972         if (IS_ERR(vma))
1973                 goto err;
1974
1975         return vma;
1976
1977 err:
1978         i915_gem_object_put(obj);
1979         return vma;
1980 }
1981
1982 struct intel_ring *
1983 intel_engine_create_ring(struct intel_engine_cs *engine, int size)
1984 {
1985         struct intel_ring *ring;
1986         struct i915_vma *vma;
1987
1988         GEM_BUG_ON(!is_power_of_2(size));
1989
1990         ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1991         if (!ring)
1992                 return ERR_PTR(-ENOMEM);
1993
1994         ring->engine = engine;
1995
1996         INIT_LIST_HEAD(&ring->request_list);
1997
1998         ring->size = size;
1999         /* Workaround an erratum on the i830 which causes a hang if
2000          * the TAIL pointer points to within the last 2 cachelines
2001          * of the buffer.
2002          */
2003         ring->effective_size = size;
2004         if (IS_I830(engine->i915) || IS_845G(engine->i915))
2005                 ring->effective_size -= 2 * CACHELINE_BYTES;
2006
2007         ring->last_retired_head = -1;
2008         intel_ring_update_space(ring);
2009
2010         vma = intel_ring_create_vma(engine->i915, size);
2011         if (IS_ERR(vma)) {
2012                 kfree(ring);
2013                 return ERR_CAST(vma);
2014         }
2015         ring->vma = vma;
2016
2017         return ring;
2018 }
2019
2020 void
2021 intel_ring_free(struct intel_ring *ring)
2022 {
2023         i915_vma_put(ring->vma);
2024         kfree(ring);
2025 }
2026
2027 static int intel_ring_context_pin(struct i915_gem_context *ctx,
2028                                   struct intel_engine_cs *engine)
2029 {
2030         struct intel_context *ce = &ctx->engine[engine->id];
2031         int ret;
2032
2033         lockdep_assert_held(&ctx->i915->drm.struct_mutex);
2034
2035         if (ce->pin_count++)
2036                 return 0;
2037
2038         if (ce->state) {
2039                 ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
2040                 if (ret)
2041                         goto error;
2042
2043                 ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
2044                                    PIN_GLOBAL | PIN_HIGH);
2045                 if (ret)
2046                         goto error;
2047         }
2048
2049         /* The kernel context is only used as a placeholder for flushing the
2050          * active context. It is never used for submitting user rendering and
2051          * as such never requires the golden render context, and so we can skip
2052          * emitting it when we switch to the kernel context. This is required
2053          * as during eviction we cannot allocate and pin the renderstate in
2054          * order to initialise the context.
2055          */
2056         if (ctx == ctx->i915->kernel_context)
2057                 ce->initialised = true;
2058
2059         i915_gem_context_get(ctx);
2060         return 0;
2061
2062 error:
2063         ce->pin_count = 0;
2064         return ret;
2065 }
2066
2067 static void intel_ring_context_unpin(struct i915_gem_context *ctx,
2068                                      struct intel_engine_cs *engine)
2069 {
2070         struct intel_context *ce = &ctx->engine[engine->id];
2071
2072         lockdep_assert_held(&ctx->i915->drm.struct_mutex);
2073
2074         if (--ce->pin_count)
2075                 return;
2076
2077         if (ce->state)
2078                 i915_vma_unpin(ce->state);
2079
2080         i915_gem_context_put(ctx);
2081 }
2082
2083 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
2084 {
2085         struct drm_i915_private *dev_priv = engine->i915;
2086         struct intel_ring *ring;
2087         int ret;
2088
2089         WARN_ON(engine->buffer);
2090
2091         intel_engine_setup_common(engine);
2092
2093         memset(engine->semaphore.sync_seqno, 0,
2094                sizeof(engine->semaphore.sync_seqno));
2095
2096         ret = intel_engine_init_common(engine);
2097         if (ret)
2098                 goto error;
2099
2100         /* We may need to do things with the shrinker which
2101          * require us to immediately switch back to the default
2102          * context. This can cause a problem as pinning the
2103          * default context also requires GTT space which may not
2104          * be available. To avoid this we always pin the default
2105          * context.
2106          */
2107         ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
2108         if (ret)
2109                 goto error;
2110
2111         ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
2112         if (IS_ERR(ring)) {
2113                 ret = PTR_ERR(ring);
2114                 goto error;
2115         }
2116
2117         if (HWS_NEEDS_PHYSICAL(dev_priv)) {
2118                 WARN_ON(engine->id != RCS);
2119                 ret = init_phys_status_page(engine);
2120                 if (ret)
2121                         goto error;
2122         } else {
2123                 ret = init_status_page(engine);
2124                 if (ret)
2125                         goto error;
2126         }
2127
2128         ret = intel_ring_pin(ring);
2129         if (ret) {
2130                 intel_ring_free(ring);
2131                 goto error;
2132         }
2133         engine->buffer = ring;
2134
2135         return 0;
2136
2137 error:
2138         intel_engine_cleanup(engine);
2139         return ret;
2140 }
2141
2142 void intel_engine_cleanup(struct intel_engine_cs *engine)
2143 {
2144         struct drm_i915_private *dev_priv;
2145
2146         if (!intel_engine_initialized(engine))
2147                 return;
2148
2149         dev_priv = engine->i915;
2150
2151         if (engine->buffer) {
2152                 WARN_ON(INTEL_GEN(dev_priv) > 2 &&
2153                         (I915_READ_MODE(engine) & MODE_IDLE) == 0);
2154
2155                 intel_ring_unpin(engine->buffer);
2156                 intel_ring_free(engine->buffer);
2157                 engine->buffer = NULL;
2158         }
2159
2160         if (engine->cleanup)
2161                 engine->cleanup(engine);
2162
2163         if (HWS_NEEDS_PHYSICAL(dev_priv)) {
2164                 WARN_ON(engine->id != RCS);
2165                 cleanup_phys_status_page(engine);
2166         } else {
2167                 cleanup_status_page(engine);
2168         }
2169
2170         intel_engine_cleanup_common(engine);
2171
2172         intel_ring_context_unpin(dev_priv->kernel_context, engine);
2173
2174         engine->i915 = NULL;
2175 }
2176
2177 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
2178 {
2179         struct intel_engine_cs *engine;
2180
2181         for_each_engine(engine, dev_priv) {
2182                 engine->buffer->head = engine->buffer->tail;
2183                 engine->buffer->last_retired_head = -1;
2184         }
2185 }
2186
2187 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
2188 {
2189         int ret;
2190
2191         /* Flush enough space to reduce the likelihood of waiting after
2192          * we start building the request - in which case we will just
2193          * have to repeat work.
2194          */
2195         request->reserved_space += LEGACY_REQUEST_SIZE;
2196
2197         request->ring = request->engine->buffer;
2198
2199         ret = intel_ring_begin(request, 0);
2200         if (ret)
2201                 return ret;
2202
2203         request->reserved_space -= LEGACY_REQUEST_SIZE;
2204         return 0;
2205 }
2206
2207 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
2208 {
2209         struct intel_ring *ring = req->ring;
2210         struct drm_i915_gem_request *target;
2211         int ret;
2212
2213         intel_ring_update_space(ring);
2214         if (ring->space >= bytes)
2215                 return 0;
2216
2217         /*
2218          * Space is reserved in the ringbuffer for finalising the request,
2219          * as that cannot be allowed to fail. During request finalisation,
2220          * reserved_space is set to 0 to stop the overallocation and the
2221          * assumption is that then we never need to wait (which has the
2222          * risk of failing with EINTR).
2223          *
2224          * See also i915_gem_request_alloc() and i915_add_request().
2225          */
2226         GEM_BUG_ON(!req->reserved_space);
2227
2228         list_for_each_entry(target, &ring->request_list, ring_link) {
2229                 unsigned space;
2230
2231                 /* Would completion of this request free enough space? */
2232                 space = __intel_ring_space(target->postfix, ring->tail,
2233                                            ring->size);
2234                 if (space >= bytes)
2235                         break;
2236         }
2237
2238         if (WARN_ON(&target->ring_link == &ring->request_list))
2239                 return -ENOSPC;
2240
2241         ret = i915_wait_request(target,
2242                                 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
2243                                 NULL, NO_WAITBOOST);
2244         if (ret)
2245                 return ret;
2246
2247         i915_gem_request_retire_upto(target);
2248
2249         intel_ring_update_space(ring);
2250         GEM_BUG_ON(ring->space < bytes);
2251         return 0;
2252 }
2253
2254 int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2255 {
2256         struct intel_ring *ring = req->ring;
2257         int remain_actual = ring->size - ring->tail;
2258         int remain_usable = ring->effective_size - ring->tail;
2259         int bytes = num_dwords * sizeof(u32);
2260         int total_bytes, wait_bytes;
2261         bool need_wrap = false;
2262
2263         total_bytes = bytes + req->reserved_space;
2264
2265         if (unlikely(bytes > remain_usable)) {
2266                 /*
2267                  * Not enough space for the basic request. So need to flush
2268                  * out the remainder and then wait for base + reserved.
2269                  */
2270                 wait_bytes = remain_actual + total_bytes;
2271                 need_wrap = true;
2272         } else if (unlikely(total_bytes > remain_usable)) {
2273                 /*
2274                  * The base request will fit but the reserved space
2275                  * falls off the end. So we don't need an immediate wrap
2276                  * and only need to effectively wait for the reserved
2277                  * size space from the start of ringbuffer.
2278                  */
2279                 wait_bytes = remain_actual + req->reserved_space;
2280         } else {
2281                 /* No wrapping required, just waiting. */
2282                 wait_bytes = total_bytes;
2283         }
2284
2285         if (wait_bytes > ring->space) {
2286                 int ret = wait_for_space(req, wait_bytes);
2287                 if (unlikely(ret))
2288                         return ret;
2289         }
2290
2291         if (unlikely(need_wrap)) {
2292                 GEM_BUG_ON(remain_actual > ring->space);
2293                 GEM_BUG_ON(ring->tail + remain_actual > ring->size);
2294
2295                 /* Fill the tail with MI_NOOP */
2296                 memset(ring->vaddr + ring->tail, 0, remain_actual);
2297                 ring->tail = 0;
2298                 ring->space -= remain_actual;
2299         }
2300
2301         ring->space -= bytes;
2302         GEM_BUG_ON(ring->space < 0);
2303         return 0;
2304 }
2305
2306 /* Align the ring tail to a cacheline boundary */
2307 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
2308 {
2309         struct intel_ring *ring = req->ring;
2310         int num_dwords =
2311                 (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
2312         int ret;
2313
2314         if (num_dwords == 0)
2315                 return 0;
2316
2317         num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
2318         ret = intel_ring_begin(req, num_dwords);
2319         if (ret)
2320                 return ret;
2321
2322         while (num_dwords--)
2323                 intel_ring_emit(ring, MI_NOOP);
2324
2325         intel_ring_advance(ring);
2326
2327         return 0;
2328 }
2329
2330 static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
2331 {
2332         struct drm_i915_private *dev_priv = request->i915;
2333
2334         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2335
2336        /* Every tail move must follow the sequence below */
2337
2338         /* Disable notification that the ring is IDLE. The GT
2339          * will then assume that it is busy and bring it out of rc6.
2340          */
2341         I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2342                       _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2343
2344         /* Clear the context id. Here be magic! */
2345         I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
2346
2347         /* Wait for the ring not to be idle, i.e. for it to wake up. */
2348         if (intel_wait_for_register_fw(dev_priv,
2349                                        GEN6_BSD_SLEEP_PSMI_CONTROL,
2350                                        GEN6_BSD_SLEEP_INDICATOR,
2351                                        0,
2352                                        50))
2353                 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2354
2355         /* Now that the ring is fully powered up, update the tail */
2356         i9xx_submit_request(request);
2357
2358         /* Let the ring send IDLE messages to the GT again,
2359          * and so let it sleep to conserve power when idle.
2360          */
2361         I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2362                       _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2363
2364         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2365 }
2366
2367 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
2368 {
2369         struct intel_ring *ring = req->ring;
2370         uint32_t cmd;
2371         int ret;
2372
2373         ret = intel_ring_begin(req, 4);
2374         if (ret)
2375                 return ret;
2376
2377         cmd = MI_FLUSH_DW;
2378         if (INTEL_GEN(req->i915) >= 8)
2379                 cmd += 1;
2380
2381         /* We always require a command barrier so that subsequent
2382          * commands, such as breadcrumb interrupts, are strictly ordered
2383          * wrt the contents of the write cache being flushed to memory
2384          * (and thus being coherent from the CPU).
2385          */
2386         cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2387
2388         /*
2389          * Bspec vol 1c.5 - video engine command streamer:
2390          * "If ENABLED, all TLBs will be invalidated once the flush
2391          * operation is complete. This bit is only valid when the
2392          * Post-Sync Operation field is a value of 1h or 3h."
2393          */
2394         if (mode & EMIT_INVALIDATE)
2395                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
2396
2397         intel_ring_emit(ring, cmd);
2398         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2399         if (INTEL_GEN(req->i915) >= 8) {
2400                 intel_ring_emit(ring, 0); /* upper addr */
2401                 intel_ring_emit(ring, 0); /* value */
2402         } else  {
2403                 intel_ring_emit(ring, 0);
2404                 intel_ring_emit(ring, MI_NOOP);
2405         }
2406         intel_ring_advance(ring);
2407         return 0;
2408 }
2409
2410 static int
2411 gen8_emit_bb_start(struct drm_i915_gem_request *req,
2412                    u64 offset, u32 len,
2413                    unsigned int dispatch_flags)
2414 {
2415         struct intel_ring *ring = req->ring;
2416         bool ppgtt = USES_PPGTT(req->i915) &&
2417                         !(dispatch_flags & I915_DISPATCH_SECURE);
2418         int ret;
2419
2420         ret = intel_ring_begin(req, 4);
2421         if (ret)
2422                 return ret;
2423
2424         /* FIXME(BDW): Address space and security selectors. */
2425         intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
2426                         (dispatch_flags & I915_DISPATCH_RS ?
2427                          MI_BATCH_RESOURCE_STREAMER : 0));
2428         intel_ring_emit(ring, lower_32_bits(offset));
2429         intel_ring_emit(ring, upper_32_bits(offset));
2430         intel_ring_emit(ring, MI_NOOP);
2431         intel_ring_advance(ring);
2432
2433         return 0;
2434 }
2435
2436 static int
2437 hsw_emit_bb_start(struct drm_i915_gem_request *req,
2438                   u64 offset, u32 len,
2439                   unsigned int dispatch_flags)
2440 {
2441         struct intel_ring *ring = req->ring;
2442         int ret;
2443
2444         ret = intel_ring_begin(req, 2);
2445         if (ret)
2446                 return ret;
2447
2448         intel_ring_emit(ring,
2449                         MI_BATCH_BUFFER_START |
2450                         (dispatch_flags & I915_DISPATCH_SECURE ?
2451                          0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
2452                         (dispatch_flags & I915_DISPATCH_RS ?
2453                          MI_BATCH_RESOURCE_STREAMER : 0));
2454         /* bit0-7 is the length on GEN6+ */
2455         intel_ring_emit(ring, offset);
2456         intel_ring_advance(ring);
2457
2458         return 0;
2459 }
2460
2461 static int
2462 gen6_emit_bb_start(struct drm_i915_gem_request *req,
2463                    u64 offset, u32 len,
2464                    unsigned int dispatch_flags)
2465 {
2466         struct intel_ring *ring = req->ring;
2467         int ret;
2468
2469         ret = intel_ring_begin(req, 2);
2470         if (ret)
2471                 return ret;
2472
2473         intel_ring_emit(ring,
2474                         MI_BATCH_BUFFER_START |
2475                         (dispatch_flags & I915_DISPATCH_SECURE ?
2476                          0 : MI_BATCH_NON_SECURE_I965));
2477         /* bit0-7 is the length on GEN6+ */
2478         intel_ring_emit(ring, offset);
2479         intel_ring_advance(ring);
2480
2481         return 0;
2482 }
2483
2484 /* Blitter support (SandyBridge+) */
2485
2486 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
2487 {
2488         struct intel_ring *ring = req->ring;
2489         uint32_t cmd;
2490         int ret;
2491
2492         ret = intel_ring_begin(req, 4);
2493         if (ret)
2494                 return ret;
2495
2496         cmd = MI_FLUSH_DW;
2497         if (INTEL_GEN(req->i915) >= 8)
2498                 cmd += 1;
2499
2500         /* We always require a command barrier so that subsequent
2501          * commands, such as breadcrumb interrupts, are strictly ordered
2502          * wrt the contents of the write cache being flushed to memory
2503          * (and thus being coherent from the CPU).
2504          */
2505         cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2506
2507         /*
2508          * Bspec vol 1c.3 - blitter engine command streamer:
2509          * "If ENABLED, all TLBs will be invalidated once the flush
2510          * operation is complete. This bit is only valid when the
2511          * Post-Sync Operation field is a value of 1h or 3h."
2512          */
2513         if (mode & EMIT_INVALIDATE)
2514                 cmd |= MI_INVALIDATE_TLB;
2515         intel_ring_emit(ring, cmd);
2516         intel_ring_emit(ring,
2517                         I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2518         if (INTEL_GEN(req->i915) >= 8) {
2519                 intel_ring_emit(ring, 0); /* upper addr */
2520                 intel_ring_emit(ring, 0); /* value */
2521         } else  {
2522                 intel_ring_emit(ring, 0);
2523                 intel_ring_emit(ring, MI_NOOP);
2524         }
2525         intel_ring_advance(ring);
2526
2527         return 0;
2528 }
2529
2530 static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
2531                                        struct intel_engine_cs *engine)
2532 {
2533         struct drm_i915_gem_object *obj;
2534         int ret, i;
2535
2536         if (!i915.semaphores)
2537                 return;
2538
2539         if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
2540                 struct i915_vma *vma;
2541
2542                 obj = i915_gem_object_create(&dev_priv->drm, 4096);
2543                 if (IS_ERR(obj))
2544                         goto err;
2545
2546                 vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
2547                 if (IS_ERR(vma))
2548                         goto err_obj;
2549
2550                 ret = i915_gem_object_set_to_gtt_domain(obj, false);
2551                 if (ret)
2552                         goto err_obj;
2553
2554                 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
2555                 if (ret)
2556                         goto err_obj;
2557
2558                 dev_priv->semaphore = vma;
2559         }
2560
2561         if (INTEL_GEN(dev_priv) >= 8) {
2562                 u32 offset = i915_ggtt_offset(dev_priv->semaphore);
2563
2564                 engine->semaphore.sync_to = gen8_ring_sync_to;
2565                 engine->semaphore.signal = gen8_xcs_signal;
2566
2567                 for (i = 0; i < I915_NUM_ENGINES; i++) {
2568                         u32 ring_offset;
2569
2570                         if (i != engine->id)
2571                                 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
2572                         else
2573                                 ring_offset = MI_SEMAPHORE_SYNC_INVALID;
2574
2575                         engine->semaphore.signal_ggtt[i] = ring_offset;
2576                 }
2577         } else if (INTEL_GEN(dev_priv) >= 6) {
2578                 engine->semaphore.sync_to = gen6_ring_sync_to;
2579                 engine->semaphore.signal = gen6_signal;
2580
2581                 /*
2582                  * The current semaphore is only applied on pre-gen8
2583                  * platform.  And there is no VCS2 ring on the pre-gen8
2584                  * platform. So the semaphore between RCS and VCS2 is
2585                  * initialized as INVALID.  Gen8 will initialize the
2586                  * sema between VCS2 and RCS later.
2587                  */
2588                 for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
2589                         static const struct {
2590                                 u32 wait_mbox;
2591                                 i915_reg_t mbox_reg;
2592                         } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
2593                                 [RCS_HW] = {
2594                                         [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
2595                                         [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
2596                                         [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
2597                                 },
2598                                 [VCS_HW] = {
2599                                         [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
2600                                         [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
2601                                         [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
2602                                 },
2603                                 [BCS_HW] = {
2604                                         [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
2605                                         [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
2606                                         [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
2607                                 },
2608                                 [VECS_HW] = {
2609                                         [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
2610                                         [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
2611                                         [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
2612                                 },
2613                         };
2614                         u32 wait_mbox;
2615                         i915_reg_t mbox_reg;
2616
2617                         if (i == engine->hw_id) {
2618                                 wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
2619                                 mbox_reg = GEN6_NOSYNC;
2620                         } else {
2621                                 wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
2622                                 mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
2623                         }
2624
2625                         engine->semaphore.mbox.wait[i] = wait_mbox;
2626                         engine->semaphore.mbox.signal[i] = mbox_reg;
2627                 }
2628         }
2629
2630         return;
2631
2632 err_obj:
2633         i915_gem_object_put(obj);
2634 err:
2635         DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
2636         i915.semaphores = 0;
2637 }
2638
2639 static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2640                                 struct intel_engine_cs *engine)
2641 {
2642         engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
2643
2644         if (INTEL_GEN(dev_priv) >= 8) {
2645                 engine->irq_enable = gen8_irq_enable;
2646                 engine->irq_disable = gen8_irq_disable;
2647                 engine->irq_seqno_barrier = gen6_seqno_barrier;
2648         } else if (INTEL_GEN(dev_priv) >= 6) {
2649                 engine->irq_enable = gen6_irq_enable;
2650                 engine->irq_disable = gen6_irq_disable;
2651                 engine->irq_seqno_barrier = gen6_seqno_barrier;
2652         } else if (INTEL_GEN(dev_priv) >= 5) {
2653                 engine->irq_enable = gen5_irq_enable;
2654                 engine->irq_disable = gen5_irq_disable;
2655                 engine->irq_seqno_barrier = gen5_seqno_barrier;
2656         } else if (INTEL_GEN(dev_priv) >= 3) {
2657                 engine->irq_enable = i9xx_irq_enable;
2658                 engine->irq_disable = i9xx_irq_disable;
2659         } else {
2660                 engine->irq_enable = i8xx_irq_enable;
2661                 engine->irq_disable = i8xx_irq_disable;
2662         }
2663 }
2664
2665 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2666                                       struct intel_engine_cs *engine)
2667 {
2668         intel_ring_init_irq(dev_priv, engine);
2669         intel_ring_init_semaphores(dev_priv, engine);
2670
2671         engine->init_hw = init_ring_common;
2672         engine->reset_hw = reset_ring_common;
2673
2674         engine->emit_request = i9xx_emit_request;
2675         if (i915.semaphores)
2676                 engine->emit_request = gen6_sema_emit_request;
2677         engine->submit_request = i9xx_submit_request;
2678
2679         if (INTEL_GEN(dev_priv) >= 8)
2680                 engine->emit_bb_start = gen8_emit_bb_start;
2681         else if (INTEL_GEN(dev_priv) >= 6)
2682                 engine->emit_bb_start = gen6_emit_bb_start;
2683         else if (INTEL_GEN(dev_priv) >= 4)
2684                 engine->emit_bb_start = i965_emit_bb_start;
2685         else if (IS_I830(dev_priv) || IS_845G(dev_priv))
2686                 engine->emit_bb_start = i830_emit_bb_start;
2687         else
2688                 engine->emit_bb_start = i915_emit_bb_start;
2689 }
2690
2691 int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2692 {
2693         struct drm_i915_private *dev_priv = engine->i915;
2694         int ret;
2695
2696         intel_ring_default_vfuncs(dev_priv, engine);
2697
2698         if (HAS_L3_DPF(dev_priv))
2699                 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2700
2701         if (INTEL_GEN(dev_priv) >= 8) {
2702                 engine->init_context = intel_rcs_ctx_init;
2703                 engine->emit_request = gen8_render_emit_request;
2704                 engine->emit_flush = gen8_render_ring_flush;
2705                 if (i915.semaphores)
2706                         engine->semaphore.signal = gen8_rcs_signal;
2707         } else if (INTEL_GEN(dev_priv) >= 6) {
2708                 engine->init_context = intel_rcs_ctx_init;
2709                 engine->emit_flush = gen7_render_ring_flush;
2710                 if (IS_GEN6(dev_priv))
2711                         engine->emit_flush = gen6_render_ring_flush;
2712         } else if (IS_GEN5(dev_priv)) {
2713                 engine->emit_flush = gen4_render_ring_flush;
2714         } else {
2715                 if (INTEL_GEN(dev_priv) < 4)
2716                         engine->emit_flush = gen2_render_ring_flush;
2717                 else
2718                         engine->emit_flush = gen4_render_ring_flush;
2719                 engine->irq_enable_mask = I915_USER_INTERRUPT;
2720         }
2721
2722         if (IS_HASWELL(dev_priv))
2723                 engine->emit_bb_start = hsw_emit_bb_start;
2724
2725         engine->init_hw = init_render_ring;
2726         engine->cleanup = render_ring_cleanup;
2727
2728         ret = intel_init_ring_buffer(engine);
2729         if (ret)
2730                 return ret;
2731
2732         if (INTEL_GEN(dev_priv) >= 6) {
2733                 ret = intel_engine_create_scratch(engine, 4096);
2734                 if (ret)
2735                         return ret;
2736         } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
2737                 ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
2738                 if (ret)
2739                         return ret;
2740         }
2741
2742         return 0;
2743 }
2744
2745 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2746 {
2747         struct drm_i915_private *dev_priv = engine->i915;
2748
2749         intel_ring_default_vfuncs(dev_priv, engine);
2750
2751         if (INTEL_GEN(dev_priv) >= 6) {
2752                 /* gen6 bsd needs a special wa for tail updates */
2753                 if (IS_GEN6(dev_priv))
2754                         engine->submit_request = gen6_bsd_submit_request;
2755                 engine->emit_flush = gen6_bsd_ring_flush;
2756                 if (INTEL_GEN(dev_priv) < 8)
2757                         engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2758         } else {
2759                 engine->mmio_base = BSD_RING_BASE;
2760                 engine->emit_flush = bsd_ring_flush;
2761                 if (IS_GEN5(dev_priv))
2762                         engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2763                 else
2764                         engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2765         }
2766
2767         return intel_init_ring_buffer(engine);
2768 }
2769
2770 /**
2771  * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
2772  */
2773 int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
2774 {
2775         struct drm_i915_private *dev_priv = engine->i915;
2776
2777         intel_ring_default_vfuncs(dev_priv, engine);
2778
2779         engine->emit_flush = gen6_bsd_ring_flush;
2780
2781         return intel_init_ring_buffer(engine);
2782 }
2783
2784 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2785 {
2786         struct drm_i915_private *dev_priv = engine->i915;
2787
2788         intel_ring_default_vfuncs(dev_priv, engine);
2789
2790         engine->emit_flush = gen6_ring_flush;
2791         if (INTEL_GEN(dev_priv) < 8)
2792                 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2793
2794         return intel_init_ring_buffer(engine);
2795 }
2796
2797 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2798 {
2799         struct drm_i915_private *dev_priv = engine->i915;
2800
2801         intel_ring_default_vfuncs(dev_priv, engine);
2802
2803         engine->emit_flush = gen6_ring_flush;
2804
2805         if (INTEL_GEN(dev_priv) < 8) {
2806                 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2807                 engine->irq_enable = hsw_vebox_irq_enable;
2808                 engine->irq_disable = hsw_vebox_irq_disable;
2809         }
2810
2811         return intel_init_ring_buffer(engine);
2812 }