2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <drm/i915_drm.h>
29 * DOC: fence register handling
31 * Important to avoid confusions: "fences" in the i915 driver are not execution
32 * fences used to track command completion but hardware detiler objects which
33 * wrap a given range of the global GTT. Each platform has only a fairly limited
34 * set of these objects.
36 * Fences are used to detile GTT memory mappings. They're also connected to the
37 * hardware frontbuffer render tracking and hence interact with frontbuffer
38 * compression. Furthermore on older platforms fences are required for tiled
39 * objects used by the display engine. They can also be used by the render
40 * engine - they're required for blitter commands and are optional for render
41 * commands. But on gen4+ both display (with the exception of fbc) and rendering
42 * have their own tiling state bits and don't need fences.
44 * Also note that fences only support X and Y tiling and hence can't be used for
45 * the fancier new tiling formats like W, Ys and Yf.
47 * Finally note that because fences are such a restricted resource they're
48 * dynamically associated with objects. Furthermore fence state is committed to
49 * the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
50 * explicitly call i915_gem_object_get_fence() to synchronize fencing status
51 * for cpu access. Also note that some code wants an unfenced view, for those
52 * cases the fence can be removed forcefully with i915_gem_object_put_fence().
54 * Internally these functions will synchronize with userspace access by removing
55 * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
60 static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
63 i915_reg_t fence_reg_lo, fence_reg_hi;
64 int fence_pitch_shift;
67 if (INTEL_INFO(fence->i915)->gen >= 6) {
68 fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
69 fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
70 fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
73 fence_reg_lo = FENCE_REG_965_LO(fence->id);
74 fence_reg_hi = FENCE_REG_965_HI(fence->id);
75 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
80 unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
81 bool is_y_tiled = tiling == I915_TILING_Y;
82 unsigned int stride = i915_gem_object_get_stride(vma->obj);
83 u32 row_size = stride * (is_y_tiled ? 32 : 8);
84 u32 size = rounddown((u32)vma->node.size, row_size);
86 val = ((vma->node.start + size - 4096) & 0xfffff000) << 32;
87 val |= vma->node.start & 0xfffff000;
88 val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
90 val |= BIT(I965_FENCE_TILING_Y_SHIFT);
91 val |= I965_FENCE_REG_VALID;
95 struct drm_i915_private *dev_priv = fence->i915;
97 /* To w/a incoherency with non-atomic 64-bit register updates,
98 * we split the 64-bit update into two 32-bit writes. In order
99 * for a partial fence not to be evaluated between writes, we
100 * precede the update with write to turn off the fence register,
101 * and only enable the fence as the last step.
103 * For extra levels of paranoia, we make sure each step lands
104 * before applying the next step.
106 I915_WRITE(fence_reg_lo, 0);
107 POSTING_READ(fence_reg_lo);
109 I915_WRITE(fence_reg_hi, upper_32_bits(val));
110 I915_WRITE(fence_reg_lo, lower_32_bits(val));
111 POSTING_READ(fence_reg_lo);
115 static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
116 struct i915_vma *vma)
122 unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
123 bool is_y_tiled = tiling == I915_TILING_Y;
124 unsigned int stride = i915_gem_object_get_stride(vma->obj);
128 WARN((vma->node.start & ~I915_FENCE_START_MASK) ||
129 !is_power_of_2(vma->node.size) ||
130 (vma->node.start & (vma->node.size - 1)),
131 "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08llx) aligned\n",
133 i915_vma_is_map_and_fenceable(vma),
136 if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
141 /* Note: pitch better be a power of two tile widths */
142 pitch_val = stride / tile_width;
143 pitch_val = ffs(pitch_val) - 1;
145 val = vma->node.start;
147 val |= BIT(I830_FENCE_TILING_Y_SHIFT);
148 val |= I915_FENCE_SIZE_BITS(vma->node.size);
149 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
150 val |= I830_FENCE_REG_VALID;
154 struct drm_i915_private *dev_priv = fence->i915;
155 i915_reg_t reg = FENCE_REG(fence->id);
157 I915_WRITE(reg, val);
162 static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
163 struct i915_vma *vma)
169 unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
170 bool is_y_tiled = tiling == I915_TILING_Y;
171 unsigned int stride = i915_gem_object_get_stride(vma->obj);
174 WARN((vma->node.start & ~I830_FENCE_START_MASK) ||
175 !is_power_of_2(vma->node.size) ||
176 (vma->node.start & (vma->node.size - 1)),
177 "object 0x%08llx not 512K or pot-size 0x%08llx aligned\n",
178 vma->node.start, vma->node.size);
180 pitch_val = stride / 128;
181 pitch_val = ffs(pitch_val) - 1;
183 val = vma->node.start;
185 val |= BIT(I830_FENCE_TILING_Y_SHIFT);
186 val |= I830_FENCE_SIZE_BITS(vma->node.size);
187 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
188 val |= I830_FENCE_REG_VALID;
192 struct drm_i915_private *dev_priv = fence->i915;
193 i915_reg_t reg = FENCE_REG(fence->id);
195 I915_WRITE(reg, val);
200 static void fence_write(struct drm_i915_fence_reg *fence,
201 struct i915_vma *vma)
203 /* Previous access through the fence register is marshalled by
204 * the mb() inside the fault handlers (i915_gem_release_mmaps)
205 * and explicitly managed for internal users.
208 if (IS_GEN2(fence->i915))
209 i830_write_fence_reg(fence, vma);
210 else if (IS_GEN3(fence->i915))
211 i915_write_fence_reg(fence, vma);
213 i965_write_fence_reg(fence, vma);
215 /* Access through the fenced region afterwards is
216 * ordered by the posting reads whilst writing the registers.
219 fence->dirty = false;
222 static int fence_update(struct drm_i915_fence_reg *fence,
223 struct i915_vma *vma)
228 if (!i915_vma_is_map_and_fenceable(vma))
231 if (WARN(!i915_gem_object_get_stride(vma->obj) ||
232 !i915_gem_object_get_tiling(vma->obj),
233 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
234 i915_gem_object_get_stride(vma->obj),
235 i915_gem_object_get_tiling(vma->obj)))
238 ret = i915_gem_active_retire(&vma->last_fence,
239 &vma->obj->base.dev->struct_mutex);
245 ret = i915_gem_active_retire(&fence->vma->last_fence,
246 &fence->vma->obj->base.dev->struct_mutex);
251 if (fence->vma && fence->vma != vma) {
252 /* Ensure that all userspace CPU access is completed before
253 * stealing the fence.
255 i915_gem_release_mmap(fence->vma->obj);
257 fence->vma->fence = NULL;
260 list_move(&fence->link, &fence->i915->mm.fence_list);
263 fence_write(fence, vma);
266 if (fence->vma != vma) {
271 list_move_tail(&fence->link, &fence->i915->mm.fence_list);
278 * i915_vma_put_fence - force-remove fence for a VMA
279 * @vma: vma to map linearly (not through a fence reg)
281 * This function force-removes any fence from the given object, which is useful
282 * if the kernel wants to do untiled GTT access.
286 * 0 on success, negative error code on failure.
289 i915_vma_put_fence(struct i915_vma *vma)
291 struct drm_i915_fence_reg *fence = vma->fence;
293 assert_rpm_wakelock_held(to_i915(vma->vm->dev));
298 if (fence->pin_count)
301 return fence_update(fence, NULL);
304 static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
306 struct drm_i915_fence_reg *fence;
308 list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
309 if (fence->pin_count)
315 /* Wait for completion of pending flips which consume fences */
316 if (intel_has_pending_fb_unpin(&dev_priv->drm))
317 return ERR_PTR(-EAGAIN);
319 return ERR_PTR(-EDEADLK);
323 * i915_vma_get_fence - set up fencing for a vma
324 * @vma: vma to map through a fence reg
326 * When mapping objects through the GTT, userspace wants to be able to write
327 * to them without having to worry about swizzling if the object is tiled.
328 * This function walks the fence regs looking for a free one for @obj,
329 * stealing one if it can't find any.
331 * It then sets up the reg based on the object's properties: address, pitch
334 * For an untiled surface, this removes any existing fence.
338 * 0 on success, negative error code on failure.
341 i915_vma_get_fence(struct i915_vma *vma)
343 struct drm_i915_fence_reg *fence;
344 struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
346 assert_rpm_wakelock_held(to_i915(vma->vm->dev));
348 /* Just update our place in the LRU if our fence is getting reused. */
352 list_move_tail(&fence->link,
353 &fence->i915->mm.fence_list);
357 fence = fence_find(to_i915(vma->vm->dev));
359 return PTR_ERR(fence);
363 return fence_update(fence, set);
367 * i915_gem_restore_fences - restore fence state
370 * Restore the hw fence state to match the software tracking again, to be called
371 * after a gpu reset and on resume.
373 void i915_gem_restore_fences(struct drm_device *dev)
375 struct drm_i915_private *dev_priv = to_i915(dev);
378 /* Note that this may be called outside of struct_mutex, by
379 * runtime suspend/resume. The barrier we require is enforced by
380 * rpm itself - all access to fences/GTT are only within an rpm
381 * wakeref, and to acquire that wakeref you must pass through here.
384 for (i = 0; i < dev_priv->num_fence_regs; i++) {
385 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
386 struct i915_vma *vma = reg->vma;
389 * Commit delayed tiling changes if we have an object still
390 * attached to the fence, otherwise just clear the fence.
392 if (vma && !i915_gem_object_is_tiled(vma->obj)) {
393 GEM_BUG_ON(!reg->dirty);
394 GEM_BUG_ON(vma->obj->fault_mappable);
396 list_move(®->link, &dev_priv->mm.fence_list);
401 fence_write(reg, vma);
407 * DOC: tiling swizzling details
409 * The idea behind tiling is to increase cache hit rates by rearranging
410 * pixel data so that a group of pixel accesses are in the same cacheline.
411 * Performance improvement from doing this on the back/depth buffer are on
414 * Intel architectures make this somewhat more complicated, though, by
415 * adjustments made to addressing of data when the memory is in interleaved
416 * mode (matched pairs of DIMMS) to improve memory bandwidth.
417 * For interleaved memory, the CPU sends every sequential 64 bytes
418 * to an alternate memory channel so it can get the bandwidth from both.
420 * The GPU also rearranges its accesses for increased bandwidth to interleaved
421 * memory, and it matches what the CPU does for non-tiled. However, when tiled
422 * it does it a little differently, since one walks addresses not just in the
423 * X direction but also Y. So, along with alternating channels when bit
424 * 6 of the address flips, it also alternates when other bits flip -- Bits 9
425 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
426 * are common to both the 915 and 965-class hardware.
428 * The CPU also sometimes XORs in higher bits as well, to improve
429 * bandwidth doing strided access like we do so frequently in graphics. This
430 * is called "Channel XOR Randomization" in the MCH documentation. The result
431 * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
434 * All of this bit 6 XORing has an effect on our memory management,
435 * as we need to make sure that the 3d driver can correctly address object
438 * If we don't have interleaved memory, all tiling is safe and no swizzling is
441 * When bit 17 is XORed in, we simply refuse to tile at all. Bit
442 * 17 is not just a page offset, so as we page an object out and back in,
443 * individual pages in it will have different bit 17 addresses, resulting in
444 * each 64 bytes being swapped with its neighbor!
446 * Otherwise, if interleaved, we have to tell the 3d driver what the address
447 * swizzling it needs to do is, since it's writing with the CPU to the pages
448 * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
449 * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
450 * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
451 * to match what the GPU expects.
455 * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
458 * Detects bit 6 swizzling of address lookup between IGD access and CPU
459 * access through main memory.
462 i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
464 struct drm_i915_private *dev_priv = to_i915(dev);
465 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
466 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
468 if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
470 * On BDW+, swizzling is not used. We leave the CPU memory
471 * controller in charge of optimizing memory accesses without
472 * the extra address manipulation GPU side.
474 * VLV and CHV don't have GPU swizzling.
476 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
477 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
478 } else if (INTEL_INFO(dev)->gen >= 6) {
479 if (dev_priv->preserve_bios_swizzle) {
480 if (I915_READ(DISP_ARB_CTL) &
481 DISP_TILE_SURFACE_SWIZZLING) {
482 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
483 swizzle_y = I915_BIT_6_SWIZZLE_9;
485 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
486 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
489 uint32_t dimm_c0, dimm_c1;
490 dimm_c0 = I915_READ(MAD_DIMM_C0);
491 dimm_c1 = I915_READ(MAD_DIMM_C1);
492 dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
493 dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
494 /* Enable swizzling when the channels are populated
495 * with identically sized dimms. We don't need to check
496 * the 3rd channel because no cpu with gpu attached
497 * ships in that configuration. Also, swizzling only
498 * makes sense for 2 channels anyway. */
499 if (dimm_c0 == dimm_c1) {
500 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
501 swizzle_y = I915_BIT_6_SWIZZLE_9;
503 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
504 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
507 } else if (IS_GEN5(dev)) {
508 /* On Ironlake whatever DRAM config, GPU always do
509 * same swizzling setup.
511 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
512 swizzle_y = I915_BIT_6_SWIZZLE_9;
513 } else if (IS_GEN2(dev)) {
514 /* As far as we know, the 865 doesn't have these bit 6
517 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
518 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
519 } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
522 /* On 9xx chipsets, channel interleave by the CPU is
523 * determined by DCC. For single-channel, neither the CPU
524 * nor the GPU do swizzling. For dual channel interleaved,
525 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
526 * 9 for Y tiled. The CPU's interleave is independent, and
527 * can be based on either bit 11 (haven't seen this yet) or
530 dcc = I915_READ(DCC);
531 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
532 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
533 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
534 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
535 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
537 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
538 if (dcc & DCC_CHANNEL_XOR_DISABLE) {
539 /* This is the base swizzling by the GPU for
542 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
543 swizzle_y = I915_BIT_6_SWIZZLE_9;
544 } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
545 /* Bit 11 swizzling by the CPU in addition. */
546 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
547 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
549 /* Bit 17 swizzling by the CPU in addition. */
550 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
551 swizzle_y = I915_BIT_6_SWIZZLE_9_17;
556 /* check for L-shaped memory aka modified enhanced addressing */
558 !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
559 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
560 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
563 if (dcc == 0xffffffff) {
564 DRM_ERROR("Couldn't read from MCHBAR. "
565 "Disabling tiling.\n");
566 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
567 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
570 /* The 965, G33, and newer, have a very flexible memory
571 * configuration. It will enable dual-channel mode
572 * (interleaving) on as much memory as it can, and the GPU
573 * will additionally sometimes enable different bit 6
574 * swizzling for tiled objects from the CPU.
576 * Here's what I found on the G965:
577 * slot fill memory size swizzling
578 * 0A 0B 1A 1B 1-ch 2-ch
580 * 512 0 512 0 16 1008 X
581 * 512 0 0 512 16 1008 X
582 * 0 512 0 512 16 1008 X
583 * 1024 1024 1024 0 2048 1024 O
585 * We could probably detect this based on either the DRB
586 * matching, which was the case for the swizzling required in
587 * the table above, or from the 1-ch value being less than
588 * the minimum size of a rank.
590 * Reports indicate that the swizzling actually
591 * varies depending upon page placement inside the
592 * channels, i.e. we see swizzled pages where the
593 * banks of memory are paired and unswizzled on the
594 * uneven portion, so leave that as unknown.
596 if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
597 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
598 swizzle_y = I915_BIT_6_SWIZZLE_9;
602 if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
603 swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
604 /* Userspace likes to explode if it sees unknown swizzling,
605 * so lie. We will finish the lie when reporting through
606 * the get-tiling-ioctl by reporting the physical swizzle
607 * mode as unknown instead.
609 * As we don't strictly know what the swizzling is, it may be
610 * bit17 dependent, and so we need to also prevent the pages
613 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
614 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
615 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
618 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
619 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
623 * Swap every 64 bytes of this page around, to account for it having a new
624 * bit 17 of its physical address and therefore being interpreted differently
628 i915_gem_swizzle_page(struct page *page)
636 for (i = 0; i < PAGE_SIZE; i += 128) {
637 memcpy(temp, &vaddr[i], 64);
638 memcpy(&vaddr[i], &vaddr[i + 64], 64);
639 memcpy(&vaddr[i + 64], temp, 64);
646 * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
647 * @obj: i915 GEM buffer object
649 * This function fixes up the swizzling in case any page frame number for this
650 * object has changed in bit 17 since that state has been saved with
651 * i915_gem_object_save_bit_17_swizzle().
653 * This is called when pinning backing storage again, since the kernel is free
654 * to move unpinned backing storage around (either by directly moving pages or
655 * by swapping them out and back in again).
658 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
660 struct sgt_iter sgt_iter;
664 if (obj->bit_17 == NULL)
668 for_each_sgt_page(page, sgt_iter, obj->pages) {
669 char new_bit_17 = page_to_phys(page) >> 17;
670 if ((new_bit_17 & 0x1) !=
671 (test_bit(i, obj->bit_17) != 0)) {
672 i915_gem_swizzle_page(page);
673 set_page_dirty(page);
680 * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
681 * @obj: i915 GEM buffer object
683 * This function saves the bit 17 of each page frame number so that swizzling
684 * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
685 * be called before the backing storage can be unpinned.
688 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
690 struct sgt_iter sgt_iter;
692 int page_count = obj->base.size >> PAGE_SHIFT;
695 if (obj->bit_17 == NULL) {
696 obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
697 sizeof(long), GFP_KERNEL);
698 if (obj->bit_17 == NULL) {
699 DRM_ERROR("Failed to allocate memory for bit 17 "
707 for_each_sgt_page(page, sgt_iter, obj->pages) {
708 if (page_to_phys(page) & (1 << 17))
709 __set_bit(i, obj->bit_17);
711 __clear_bit(i, obj->bit_17);