4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
38 #include "i915_pvinfo.h"
41 #if defined(VERBOSE_DEBUG)
42 #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
44 #define gvt_vdbg_mm(fmt, args...)
47 static bool enable_out_of_sync = false;
48 static int preallocated_oos_pages = 8192;
51 * validate a gm address and related range size,
52 * translate it to host gm address
54 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
57 return vgpu_gmadr_is_valid(vgpu, addr);
59 if (vgpu_gmadr_is_aperture(vgpu, addr) &&
60 vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
62 else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
63 vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
66 gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
71 /* translate a guest gmadr to host gmadr */
72 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
74 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
76 if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
77 "invalid guest gmadr %llx\n", g_addr))
80 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
81 *h_addr = vgpu_aperture_gmadr_base(vgpu)
82 + (g_addr - vgpu_aperture_offset(vgpu));
84 *h_addr = vgpu_hidden_gmadr_base(vgpu)
85 + (g_addr - vgpu_hidden_offset(vgpu));
89 /* translate a host gmadr to guest gmadr */
90 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
92 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
94 if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
95 "invalid host gmadr %llx\n", h_addr))
98 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
99 *g_addr = vgpu_aperture_gmadr_base(vgpu)
100 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
102 *g_addr = vgpu_hidden_gmadr_base(vgpu)
103 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
107 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
108 unsigned long *h_index)
113 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
118 *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
122 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
123 unsigned long *g_index)
128 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
133 *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
137 #define gtt_type_is_entry(type) \
138 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
139 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
140 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
142 #define gtt_type_is_pt(type) \
143 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
145 #define gtt_type_is_pte_pt(type) \
146 (type == GTT_TYPE_PPGTT_PTE_PT)
148 #define gtt_type_is_root_pointer(type) \
149 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
151 #define gtt_init_entry(e, t, p, v) do { \
154 memcpy(&(e)->val64, &v, sizeof(v)); \
158 * Mappings between GTT_TYPE* enumerations.
159 * Following information can be found according to the given type:
160 * - type of next level page table
161 * - type of entry inside this level page table
162 * - type of entry with PSE set
164 * If the given type doesn't have such a kind of information,
165 * e.g. give a l4 root entry type, then request to get its PSE type,
166 * give a PTE page table type, then request to get its next level page
167 * table type, as we know l4 root entry doesn't have a PSE bit,
168 * and a PTE page table doesn't have a next level page table type,
169 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
173 struct gtt_type_table_entry {
180 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
182 .entry_type = e_type, \
183 .pt_type = cpt_type, \
184 .next_pt_type = npt_type, \
185 .pse_entry_type = pse_type, \
188 static struct gtt_type_table_entry gtt_type_table[] = {
189 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
190 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
192 GTT_TYPE_PPGTT_PML4_PT,
194 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
195 GTT_TYPE_PPGTT_PML4_ENTRY,
196 GTT_TYPE_PPGTT_PML4_PT,
197 GTT_TYPE_PPGTT_PDP_PT,
199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
200 GTT_TYPE_PPGTT_PML4_ENTRY,
201 GTT_TYPE_PPGTT_PML4_PT,
202 GTT_TYPE_PPGTT_PDP_PT,
204 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
205 GTT_TYPE_PPGTT_PDP_ENTRY,
206 GTT_TYPE_PPGTT_PDP_PT,
207 GTT_TYPE_PPGTT_PDE_PT,
208 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
209 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
210 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
212 GTT_TYPE_PPGTT_PDE_PT,
213 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
214 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
215 GTT_TYPE_PPGTT_PDP_ENTRY,
216 GTT_TYPE_PPGTT_PDP_PT,
217 GTT_TYPE_PPGTT_PDE_PT,
218 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
219 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
220 GTT_TYPE_PPGTT_PDE_ENTRY,
221 GTT_TYPE_PPGTT_PDE_PT,
222 GTT_TYPE_PPGTT_PTE_PT,
223 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
224 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
225 GTT_TYPE_PPGTT_PDE_ENTRY,
226 GTT_TYPE_PPGTT_PDE_PT,
227 GTT_TYPE_PPGTT_PTE_PT,
228 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
229 /* We take IPS bit as 'PSE' for PTE level. */
230 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
231 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
232 GTT_TYPE_PPGTT_PTE_PT,
234 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
235 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
236 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
237 GTT_TYPE_PPGTT_PTE_PT,
239 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
240 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
241 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
242 GTT_TYPE_PPGTT_PTE_PT,
244 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
245 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
246 GTT_TYPE_PPGTT_PDE_ENTRY,
247 GTT_TYPE_PPGTT_PDE_PT,
249 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
250 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
251 GTT_TYPE_PPGTT_PDP_ENTRY,
252 GTT_TYPE_PPGTT_PDP_PT,
254 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
255 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
262 static inline int get_next_pt_type(int type)
264 return gtt_type_table[type].next_pt_type;
267 static inline int get_pt_type(int type)
269 return gtt_type_table[type].pt_type;
272 static inline int get_entry_type(int type)
274 return gtt_type_table[type].entry_type;
277 static inline int get_pse_type(int type)
279 return gtt_type_table[type].pse_entry_type;
282 static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
284 void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
289 static void ggtt_invalidate(struct intel_gt *gt)
291 mmio_hw_access_pre(gt);
292 intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
293 mmio_hw_access_post(gt);
296 static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
298 void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
303 static inline int gtt_get_entry64(void *pt,
304 struct intel_gvt_gtt_entry *e,
305 unsigned long index, bool hypervisor_access, unsigned long gpa,
306 struct intel_vgpu *vgpu)
308 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
311 if (WARN_ON(info->gtt_entry_size != 8))
314 if (hypervisor_access) {
315 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
316 (index << info->gtt_entry_size_shift),
321 e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
323 e->val64 = *((u64 *)pt + index);
328 static inline int gtt_set_entry64(void *pt,
329 struct intel_gvt_gtt_entry *e,
330 unsigned long index, bool hypervisor_access, unsigned long gpa,
331 struct intel_vgpu *vgpu)
333 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
336 if (WARN_ON(info->gtt_entry_size != 8))
339 if (hypervisor_access) {
340 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
341 (index << info->gtt_entry_size_shift),
346 write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
348 *((u64 *)pt + index) = e->val64;
355 #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
356 #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
357 #define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
358 #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
360 #define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
361 #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
363 #define GTT_64K_PTE_STRIDE 16
365 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
369 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
370 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
371 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
372 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
373 else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
374 pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
376 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
380 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
382 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
383 e->val64 &= ~ADDR_1G_MASK;
384 pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
385 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
386 e->val64 &= ~ADDR_2M_MASK;
387 pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
388 } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
389 e->val64 &= ~ADDR_64K_MASK;
390 pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
392 e->val64 &= ~ADDR_4K_MASK;
393 pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
396 e->val64 |= (pfn << PAGE_SHIFT);
399 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
401 return !!(e->val64 & _PAGE_PSE);
404 static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
406 if (gen8_gtt_test_pse(e)) {
408 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
409 e->val64 &= ~_PAGE_PSE;
410 e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
412 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
413 e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
414 e->val64 &= ~_PAGE_PSE;
422 static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
424 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
427 return !!(e->val64 & GEN8_PDE_IPS_64K);
430 static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
432 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
435 e->val64 &= ~GEN8_PDE_IPS_64K;
438 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
441 * i915 writes PDP root pointer registers without present bit,
442 * it also works, so we need to treat root pointer entry
445 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
446 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
447 return (e->val64 != 0);
449 return (e->val64 & _PAGE_PRESENT);
452 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
454 e->val64 &= ~_PAGE_PRESENT;
457 static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
459 e->val64 |= _PAGE_PRESENT;
462 static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
464 return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
467 static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
469 e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
472 static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
474 e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
478 * Per-platform GMA routines.
480 static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
482 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
484 trace_gma_index(__func__, gma, x);
488 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
489 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
491 unsigned long x = (exp); \
492 trace_gma_index(__func__, gma, x); \
496 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
497 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
498 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
499 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
500 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
502 static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
503 .get_entry = gtt_get_entry64,
504 .set_entry = gtt_set_entry64,
505 .clear_present = gtt_entry_clear_present,
506 .set_present = gtt_entry_set_present,
507 .test_present = gen8_gtt_test_present,
508 .test_pse = gen8_gtt_test_pse,
509 .clear_pse = gen8_gtt_clear_pse,
510 .clear_ips = gen8_gtt_clear_ips,
511 .test_ips = gen8_gtt_test_ips,
512 .clear_64k_splited = gen8_gtt_clear_64k_splited,
513 .set_64k_splited = gen8_gtt_set_64k_splited,
514 .test_64k_splited = gen8_gtt_test_64k_splited,
515 .get_pfn = gen8_gtt_get_pfn,
516 .set_pfn = gen8_gtt_set_pfn,
519 static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
520 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
521 .gma_to_pte_index = gen8_gma_to_pte_index,
522 .gma_to_pde_index = gen8_gma_to_pde_index,
523 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
524 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
525 .gma_to_pml4_index = gen8_gma_to_pml4_index,
528 /* Update entry type per pse and ips bit. */
529 static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops,
530 struct intel_gvt_gtt_entry *entry, bool ips)
532 switch (entry->type) {
533 case GTT_TYPE_PPGTT_PDE_ENTRY:
534 case GTT_TYPE_PPGTT_PDP_ENTRY:
535 if (pte_ops->test_pse(entry))
536 entry->type = get_pse_type(entry->type);
538 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
540 entry->type = get_pse_type(entry->type);
543 GEM_BUG_ON(!gtt_type_is_entry(entry->type));
546 GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
552 static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
553 struct intel_gvt_gtt_entry *entry, unsigned long index,
556 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
558 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
560 entry->type = mm->ppgtt_mm.root_entry_type;
561 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
562 mm->ppgtt_mm.shadow_pdps,
563 entry, index, false, 0, mm->vgpu);
564 update_entry_type_for_real(pte_ops, entry, false);
567 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
568 struct intel_gvt_gtt_entry *entry, unsigned long index)
570 _ppgtt_get_root_entry(mm, entry, index, true);
573 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
574 struct intel_gvt_gtt_entry *entry, unsigned long index)
576 _ppgtt_get_root_entry(mm, entry, index, false);
579 static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
580 struct intel_gvt_gtt_entry *entry, unsigned long index,
583 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
585 pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
586 mm->ppgtt_mm.shadow_pdps,
587 entry, index, false, 0, mm->vgpu);
590 static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
591 struct intel_gvt_gtt_entry *entry, unsigned long index)
593 _ppgtt_set_root_entry(mm, entry, index, true);
596 static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
597 struct intel_gvt_gtt_entry *entry, unsigned long index)
599 _ppgtt_set_root_entry(mm, entry, index, false);
602 static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
603 struct intel_gvt_gtt_entry *entry, unsigned long index)
605 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
607 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
609 entry->type = GTT_TYPE_GGTT_PTE;
610 pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
614 static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
615 struct intel_gvt_gtt_entry *entry, unsigned long index)
617 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
619 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
621 pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
625 static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
626 struct intel_gvt_gtt_entry *entry, unsigned long index)
628 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
630 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
632 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
635 static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
636 struct intel_gvt_gtt_entry *entry, unsigned long index)
638 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
639 unsigned long offset = index;
641 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
643 if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
644 offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
645 mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64;
646 } else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
647 offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
648 mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64;
651 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
655 * PPGTT shadow page table helpers.
657 static inline int ppgtt_spt_get_entry(
658 struct intel_vgpu_ppgtt_spt *spt,
659 void *page_table, int type,
660 struct intel_gvt_gtt_entry *e, unsigned long index,
663 struct intel_gvt *gvt = spt->vgpu->gvt;
664 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
667 e->type = get_entry_type(type);
669 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
672 ret = ops->get_entry(page_table, e, index, guest,
673 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
678 update_entry_type_for_real(ops, e, guest ?
679 spt->guest_page.pde_ips : false);
681 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
682 type, e->type, index, e->val64);
686 static inline int ppgtt_spt_set_entry(
687 struct intel_vgpu_ppgtt_spt *spt,
688 void *page_table, int type,
689 struct intel_gvt_gtt_entry *e, unsigned long index,
692 struct intel_gvt *gvt = spt->vgpu->gvt;
693 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
695 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
698 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
699 type, e->type, index, e->val64);
701 return ops->set_entry(page_table, e, index, guest,
702 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
706 #define ppgtt_get_guest_entry(spt, e, index) \
707 ppgtt_spt_get_entry(spt, NULL, \
708 spt->guest_page.type, e, index, true)
710 #define ppgtt_set_guest_entry(spt, e, index) \
711 ppgtt_spt_set_entry(spt, NULL, \
712 spt->guest_page.type, e, index, true)
714 #define ppgtt_get_shadow_entry(spt, e, index) \
715 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
716 spt->shadow_page.type, e, index, false)
718 #define ppgtt_set_shadow_entry(spt, e, index) \
719 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
720 spt->shadow_page.type, e, index, false)
722 static void *alloc_spt(gfp_t gfp_mask)
724 struct intel_vgpu_ppgtt_spt *spt;
726 spt = kzalloc(sizeof(*spt), gfp_mask);
730 spt->shadow_page.page = alloc_page(gfp_mask);
731 if (!spt->shadow_page.page) {
738 static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
740 __free_page(spt->shadow_page.page);
744 static int detach_oos_page(struct intel_vgpu *vgpu,
745 struct intel_vgpu_oos_page *oos_page);
747 static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
749 struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev;
751 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
753 dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
754 PCI_DMA_BIDIRECTIONAL);
756 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
758 if (spt->guest_page.gfn) {
759 if (spt->guest_page.oos_page)
760 detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
762 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
765 list_del_init(&spt->post_shadow_list);
769 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
771 struct intel_vgpu_ppgtt_spt *spt, *spn;
772 struct radix_tree_iter iter;
777 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
778 spt = radix_tree_deref_slot(slot);
779 list_move(&spt->post_shadow_list, &all_spt);
783 list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
787 static int ppgtt_handle_guest_write_page_table_bytes(
788 struct intel_vgpu_ppgtt_spt *spt,
789 u64 pa, void *p_data, int bytes);
791 static int ppgtt_write_protection_handler(
792 struct intel_vgpu_page_track *page_track,
793 u64 gpa, void *data, int bytes)
795 struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
799 if (bytes != 4 && bytes != 8)
802 ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
808 /* Find a spt by guest gfn. */
809 static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
810 struct intel_vgpu *vgpu, unsigned long gfn)
812 struct intel_vgpu_page_track *track;
814 track = intel_vgpu_find_page_track(vgpu, gfn);
815 if (track && track->handler == ppgtt_write_protection_handler)
816 return track->priv_data;
821 /* Find the spt by shadow page mfn. */
822 static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
823 struct intel_vgpu *vgpu, unsigned long mfn)
825 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
828 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
830 /* Allocate shadow page table without guest page. */
831 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
832 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
834 struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev;
835 struct intel_vgpu_ppgtt_spt *spt = NULL;
840 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
842 if (reclaim_one_ppgtt_mm(vgpu->gvt))
845 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
846 return ERR_PTR(-ENOMEM);
850 atomic_set(&spt->refcount, 1);
851 INIT_LIST_HEAD(&spt->post_shadow_list);
856 spt->shadow_page.type = type;
857 daddr = dma_map_page(kdev, spt->shadow_page.page,
858 0, 4096, PCI_DMA_BIDIRECTIONAL);
859 if (dma_mapping_error(kdev, daddr)) {
860 gvt_vgpu_err("fail to map dma addr\n");
864 spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
865 spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
867 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
874 dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
880 /* Allocate shadow page table associated with specific gfn. */
881 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
882 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
883 unsigned long gfn, bool guest_pde_ips)
885 struct intel_vgpu_ppgtt_spt *spt;
888 spt = ppgtt_alloc_spt(vgpu, type);
895 ret = intel_vgpu_register_page_track(vgpu, gfn,
896 ppgtt_write_protection_handler, spt);
902 spt->guest_page.type = type;
903 spt->guest_page.gfn = gfn;
904 spt->guest_page.pde_ips = guest_pde_ips;
906 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
911 #define pt_entry_size_shift(spt) \
912 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
914 #define pt_entries(spt) \
915 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
917 #define for_each_present_guest_entry(spt, e, i) \
918 for (i = 0; i < pt_entries(spt); \
919 i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
920 if (!ppgtt_get_guest_entry(spt, e, i) && \
921 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
923 #define for_each_present_shadow_entry(spt, e, i) \
924 for (i = 0; i < pt_entries(spt); \
925 i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
926 if (!ppgtt_get_shadow_entry(spt, e, i) && \
927 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
929 #define for_each_shadow_entry(spt, e, i) \
930 for (i = 0; i < pt_entries(spt); \
931 i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
932 if (!ppgtt_get_shadow_entry(spt, e, i))
934 static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
936 int v = atomic_read(&spt->refcount);
938 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
939 atomic_inc(&spt->refcount);
942 static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
944 int v = atomic_read(&spt->refcount);
946 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
947 return atomic_dec_return(&spt->refcount);
950 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
952 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
953 struct intel_gvt_gtt_entry *e)
955 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
956 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
957 struct intel_vgpu_ppgtt_spt *s;
958 enum intel_gvt_gtt_type cur_pt_type;
960 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
962 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
963 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
964 cur_pt_type = get_next_pt_type(e->type);
966 if (!gtt_type_is_pt(cur_pt_type) ||
967 !gtt_type_is_pt(cur_pt_type + 1)) {
968 drm_WARN(&i915->drm, 1,
969 "Invalid page table type, cur_pt_type is: %d\n",
976 if (ops->get_pfn(e) ==
977 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
980 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
982 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
986 return ppgtt_invalidate_spt(s);
989 static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
990 struct intel_gvt_gtt_entry *entry)
992 struct intel_vgpu *vgpu = spt->vgpu;
993 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
997 pfn = ops->get_pfn(entry);
998 type = spt->shadow_page.type;
1000 /* Uninitialized spte or unshadowed spte. */
1001 if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
1004 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
1007 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
1009 struct intel_vgpu *vgpu = spt->vgpu;
1010 struct intel_gvt_gtt_entry e;
1011 unsigned long index;
1014 trace_spt_change(spt->vgpu->id, "die", spt,
1015 spt->guest_page.gfn, spt->shadow_page.type);
1017 if (ppgtt_put_spt(spt) > 0)
1020 for_each_present_shadow_entry(spt, &e, index) {
1022 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1023 gvt_vdbg_mm("invalidate 4K entry\n");
1024 ppgtt_invalidate_pte(spt, &e);
1026 case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1027 /* We don't setup 64K shadow entry so far. */
1028 WARN(1, "suspicious 64K gtt entry\n");
1030 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1031 gvt_vdbg_mm("invalidate 2M entry\n");
1033 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1034 WARN(1, "GVT doesn't support 1GB page\n");
1036 case GTT_TYPE_PPGTT_PML4_ENTRY:
1037 case GTT_TYPE_PPGTT_PDP_ENTRY:
1038 case GTT_TYPE_PPGTT_PDE_ENTRY:
1039 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
1040 ret = ppgtt_invalidate_spt_by_shadow_entry(
1050 trace_spt_change(spt->vgpu->id, "release", spt,
1051 spt->guest_page.gfn, spt->shadow_page.type);
1052 ppgtt_free_spt(spt);
1055 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
1056 spt, e.val64, e.type);
1060 static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
1062 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1064 if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
1065 u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
1066 GAMW_ECO_ENABLE_64K_IPS_FIELD;
1068 return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
1069 } else if (INTEL_GEN(dev_priv) >= 11) {
1070 /* 64K paging only controlled by IPS bit in PTE now. */
1076 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
1078 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
1079 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
1081 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1082 struct intel_vgpu_ppgtt_spt *spt = NULL;
1086 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
1088 if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1089 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
1091 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
1095 if (ips != spt->guest_page.pde_ips) {
1096 spt->guest_page.pde_ips = ips;
1098 gvt_dbg_mm("reshadow PDE since ips changed\n");
1099 clear_page(spt->shadow_page.vaddr);
1100 ret = ppgtt_populate_spt(spt);
1107 int type = get_next_pt_type(we->type);
1109 if (!gtt_type_is_pt(type)) {
1114 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
1120 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
1124 ret = ppgtt_populate_spt(spt);
1128 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
1129 spt->shadow_page.type);
1134 ppgtt_free_spt(spt);
1137 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1138 spt, we->val64, we->type);
1139 return ERR_PTR(ret);
1142 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
1143 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
1145 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
1147 se->type = ge->type;
1148 se->val64 = ge->val64;
1150 /* Because we always split 64KB pages, so clear IPS in shadow PDE. */
1151 if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1154 ops->set_pfn(se, s->shadow_page.mfn);
1158 * Check if can do 2M page
1159 * @vgpu: target vgpu
1160 * @entry: target pfn's gtt entry
1162 * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
1163 * negtive if found err.
1165 static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
1166 struct intel_gvt_gtt_entry *entry)
1168 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1171 if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
1174 pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
1175 if (pfn == INTEL_GVT_INVALID_ADDR)
1178 return PageTransHuge(pfn_to_page(pfn));
1181 static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
1182 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1183 struct intel_gvt_gtt_entry *se)
1185 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1186 struct intel_vgpu_ppgtt_spt *sub_spt;
1187 struct intel_gvt_gtt_entry sub_se;
1188 unsigned long start_gfn;
1189 dma_addr_t dma_addr;
1190 unsigned long sub_index;
1193 gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
1195 start_gfn = ops->get_pfn(se);
1197 sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
1198 if (IS_ERR(sub_spt))
1199 return PTR_ERR(sub_spt);
1201 for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
1202 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
1203 start_gfn + sub_index, PAGE_SIZE, &dma_addr);
1206 sub_se.val64 = se->val64;
1208 /* Copy the PAT field from PDE. */
1209 sub_se.val64 &= ~_PAGE_PAT;
1210 sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
1212 ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
1213 ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
1216 /* Clear dirty field. */
1217 se->val64 &= ~_PAGE_DIRTY;
1221 ops->set_pfn(se, sub_spt->shadow_page.mfn);
1222 ppgtt_set_shadow_entry(spt, se, index);
1225 /* Cancel the existing addess mappings of DMA addr. */
1226 for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
1227 gvt_vdbg_mm("invalidate 4K entry\n");
1228 ppgtt_invalidate_pte(sub_spt, &sub_se);
1230 /* Release the new allocated spt. */
1231 trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
1232 sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
1233 ppgtt_free_spt(sub_spt);
1237 static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
1238 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1239 struct intel_gvt_gtt_entry *se)
1241 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1242 struct intel_gvt_gtt_entry entry = *se;
1243 unsigned long start_gfn;
1244 dma_addr_t dma_addr;
1247 gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
1249 GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
1251 start_gfn = ops->get_pfn(se);
1253 entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
1254 ops->set_64k_splited(&entry);
1256 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1257 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
1258 start_gfn + i, PAGE_SIZE, &dma_addr);
1262 ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
1263 ppgtt_set_shadow_entry(spt, &entry, index + i);
1268 static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
1269 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1270 struct intel_gvt_gtt_entry *ge)
1272 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1273 struct intel_gvt_gtt_entry se = *ge;
1274 unsigned long gfn, page_size = PAGE_SIZE;
1275 dma_addr_t dma_addr;
1278 if (!pte_ops->test_present(ge))
1281 gfn = pte_ops->get_pfn(ge);
1284 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1285 gvt_vdbg_mm("shadow 4K gtt entry\n");
1287 case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1288 gvt_vdbg_mm("shadow 64K gtt entry\n");
1290 * The layout of 64K page is special, the page size is
1291 * controlled by uper PDE. To be simple, we always split
1292 * 64K page to smaller 4K pages in shadow PT.
1294 return split_64KB_gtt_entry(vgpu, spt, index, &se);
1295 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1296 gvt_vdbg_mm("shadow 2M gtt entry\n");
1297 ret = is_2MB_gtt_possible(vgpu, ge);
1299 return split_2MB_gtt_entry(vgpu, spt, index, &se);
1302 page_size = I915_GTT_PAGE_SIZE_2M;
1304 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1305 gvt_vgpu_err("GVT doesn't support 1GB entry\n");
1312 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
1317 pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
1318 ppgtt_set_shadow_entry(spt, &se, index);
1322 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
1324 struct intel_vgpu *vgpu = spt->vgpu;
1325 struct intel_gvt *gvt = vgpu->gvt;
1326 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1327 struct intel_vgpu_ppgtt_spt *s;
1328 struct intel_gvt_gtt_entry se, ge;
1329 unsigned long gfn, i;
1332 trace_spt_change(spt->vgpu->id, "born", spt,
1333 spt->guest_page.gfn, spt->shadow_page.type);
1335 for_each_present_guest_entry(spt, &ge, i) {
1336 if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
1337 s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1342 ppgtt_get_shadow_entry(spt, &se, i);
1343 ppgtt_generate_shadow_entry(&se, s, &ge);
1344 ppgtt_set_shadow_entry(spt, &se, i);
1346 gfn = ops->get_pfn(&ge);
1347 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
1348 ops->set_pfn(&se, gvt->gtt.scratch_mfn);
1349 ppgtt_set_shadow_entry(spt, &se, i);
1353 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1360 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1361 spt, ge.val64, ge.type);
1365 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
1366 struct intel_gvt_gtt_entry *se, unsigned long index)
1368 struct intel_vgpu *vgpu = spt->vgpu;
1369 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1372 trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1373 spt->shadow_page.type, se->val64, index);
1375 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
1376 se->type, index, se->val64);
1378 if (!ops->test_present(se))
1381 if (ops->get_pfn(se) ==
1382 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
1385 if (gtt_type_is_pt(get_next_pt_type(se->type))) {
1386 struct intel_vgpu_ppgtt_spt *s =
1387 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
1389 gvt_vgpu_err("fail to find guest page\n");
1393 ret = ppgtt_invalidate_spt(s);
1397 /* We don't setup 64K shadow entry so far. */
1398 WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
1399 "suspicious 64K entry\n");
1400 ppgtt_invalidate_pte(spt, se);
1405 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1406 spt, se->val64, se->type);
1410 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
1411 struct intel_gvt_gtt_entry *we, unsigned long index)
1413 struct intel_vgpu *vgpu = spt->vgpu;
1414 struct intel_gvt_gtt_entry m;
1415 struct intel_vgpu_ppgtt_spt *s;
1418 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1421 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
1422 we->type, index, we->val64);
1424 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1425 s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
1430 ppgtt_get_shadow_entry(spt, &m, index);
1431 ppgtt_generate_shadow_entry(&m, s, we);
1432 ppgtt_set_shadow_entry(spt, &m, index);
1434 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
1440 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1441 spt, we->val64, we->type);
1445 static int sync_oos_page(struct intel_vgpu *vgpu,
1446 struct intel_vgpu_oos_page *oos_page)
1448 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1449 struct intel_gvt *gvt = vgpu->gvt;
1450 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1451 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1452 struct intel_gvt_gtt_entry old, new;
1456 trace_oos_change(vgpu->id, "sync", oos_page->id,
1457 spt, spt->guest_page.type);
1459 old.type = new.type = get_entry_type(spt->guest_page.type);
1460 old.val64 = new.val64 = 0;
1462 for (index = 0; index < (I915_GTT_PAGE_SIZE >>
1463 info->gtt_entry_size_shift); index++) {
1464 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1465 ops->get_entry(NULL, &new, index, true,
1466 spt->guest_page.gfn << PAGE_SHIFT, vgpu);
1468 if (old.val64 == new.val64
1469 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1472 trace_oos_sync(vgpu->id, oos_page->id,
1473 spt, spt->guest_page.type,
1476 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
1480 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1483 spt->guest_page.write_cnt = 0;
1484 list_del_init(&spt->post_shadow_list);
1488 static int detach_oos_page(struct intel_vgpu *vgpu,
1489 struct intel_vgpu_oos_page *oos_page)
1491 struct intel_gvt *gvt = vgpu->gvt;
1492 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1494 trace_oos_change(vgpu->id, "detach", oos_page->id,
1495 spt, spt->guest_page.type);
1497 spt->guest_page.write_cnt = 0;
1498 spt->guest_page.oos_page = NULL;
1499 oos_page->spt = NULL;
1501 list_del_init(&oos_page->vm_list);
1502 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1507 static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
1508 struct intel_vgpu_ppgtt_spt *spt)
1510 struct intel_gvt *gvt = spt->vgpu->gvt;
1513 ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
1514 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
1515 oos_page->mem, I915_GTT_PAGE_SIZE);
1519 oos_page->spt = spt;
1520 spt->guest_page.oos_page = oos_page;
1522 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1524 trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1525 spt, spt->guest_page.type);
1529 static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
1531 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1534 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
1538 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1539 spt, spt->guest_page.type);
1541 list_del_init(&oos_page->vm_list);
1542 return sync_oos_page(spt->vgpu, oos_page);
1545 static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
1547 struct intel_gvt *gvt = spt->vgpu->gvt;
1548 struct intel_gvt_gtt *gtt = &gvt->gtt;
1549 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1552 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1554 if (list_empty(>t->oos_page_free_list_head)) {
1555 oos_page = container_of(gtt->oos_page_use_list_head.next,
1556 struct intel_vgpu_oos_page, list);
1557 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1560 ret = detach_oos_page(spt->vgpu, oos_page);
1564 oos_page = container_of(gtt->oos_page_free_list_head.next,
1565 struct intel_vgpu_oos_page, list);
1566 return attach_oos_page(oos_page, spt);
1569 static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
1571 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1573 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1576 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1577 spt, spt->guest_page.type);
1579 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
1580 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
1584 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1587 * This function is called before submitting a guest workload to host,
1588 * to sync all the out-of-synced shadow for vGPU
1591 * Zero on success, negative error code if failed.
1593 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1595 struct list_head *pos, *n;
1596 struct intel_vgpu_oos_page *oos_page;
1599 if (!enable_out_of_sync)
1602 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1603 oos_page = container_of(pos,
1604 struct intel_vgpu_oos_page, vm_list);
1605 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1613 * The heart of PPGTT shadow page table.
1615 static int ppgtt_handle_guest_write_page_table(
1616 struct intel_vgpu_ppgtt_spt *spt,
1617 struct intel_gvt_gtt_entry *we, unsigned long index)
1619 struct intel_vgpu *vgpu = spt->vgpu;
1620 int type = spt->shadow_page.type;
1621 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1622 struct intel_gvt_gtt_entry old_se;
1626 new_present = ops->test_present(we);
1629 * Adding the new entry first and then removing the old one, that can
1630 * guarantee the ppgtt table is validated during the window between
1631 * adding and removal.
1633 ppgtt_get_shadow_entry(spt, &old_se, index);
1636 ret = ppgtt_handle_guest_entry_add(spt, we, index);
1641 ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
1646 /* For 64KB splited entries, we need clear them all. */
1647 if (ops->test_64k_splited(&old_se) &&
1648 !(index % GTT_64K_PTE_STRIDE)) {
1649 gvt_vdbg_mm("remove splited 64K shadow entries\n");
1650 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1651 ops->clear_64k_splited(&old_se);
1652 ops->set_pfn(&old_se,
1653 vgpu->gtt.scratch_pt[type].page_mfn);
1654 ppgtt_set_shadow_entry(spt, &old_se, index + i);
1656 } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
1657 old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
1658 ops->clear_pse(&old_se);
1659 ops->set_pfn(&old_se,
1660 vgpu->gtt.scratch_pt[type].page_mfn);
1661 ppgtt_set_shadow_entry(spt, &old_se, index);
1663 ops->set_pfn(&old_se,
1664 vgpu->gtt.scratch_pt[type].page_mfn);
1665 ppgtt_set_shadow_entry(spt, &old_se, index);
1671 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1672 spt, we->val64, we->type);
1678 static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
1680 return enable_out_of_sync
1681 && gtt_type_is_pte_pt(spt->guest_page.type)
1682 && spt->guest_page.write_cnt >= 2;
1685 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1686 unsigned long index)
1688 set_bit(index, spt->post_shadow_bitmap);
1689 if (!list_empty(&spt->post_shadow_list))
1692 list_add_tail(&spt->post_shadow_list,
1693 &spt->vgpu->gtt.post_shadow_list_head);
1697 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1700 * This function is called before submitting a guest workload to host,
1701 * to flush all the post shadows for a vGPU.
1704 * Zero on success, negative error code if failed.
1706 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1708 struct list_head *pos, *n;
1709 struct intel_vgpu_ppgtt_spt *spt;
1710 struct intel_gvt_gtt_entry ge;
1711 unsigned long index;
1714 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1715 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1718 for_each_set_bit(index, spt->post_shadow_bitmap,
1719 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1720 ppgtt_get_guest_entry(spt, &ge, index);
1722 ret = ppgtt_handle_guest_write_page_table(spt,
1726 clear_bit(index, spt->post_shadow_bitmap);
1728 list_del_init(&spt->post_shadow_list);
1733 static int ppgtt_handle_guest_write_page_table_bytes(
1734 struct intel_vgpu_ppgtt_spt *spt,
1735 u64 pa, void *p_data, int bytes)
1737 struct intel_vgpu *vgpu = spt->vgpu;
1738 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1739 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1740 struct intel_gvt_gtt_entry we, se;
1741 unsigned long index;
1744 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1746 ppgtt_get_guest_entry(spt, &we, index);
1749 * For page table which has 64K gtt entry, only PTE#0, PTE#16,
1750 * PTE#32, ... PTE#496 are used. Unused PTEs update should be
1753 if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
1754 (index % GTT_64K_PTE_STRIDE)) {
1755 gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
1760 if (bytes == info->gtt_entry_size) {
1761 ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
1765 if (!test_bit(index, spt->post_shadow_bitmap)) {
1766 int type = spt->shadow_page.type;
1768 ppgtt_get_shadow_entry(spt, &se, index);
1769 ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
1772 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1773 ppgtt_set_shadow_entry(spt, &se, index);
1775 ppgtt_set_post_shadow(spt, index);
1778 if (!enable_out_of_sync)
1781 spt->guest_page.write_cnt++;
1783 if (spt->guest_page.oos_page)
1784 ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
1787 if (can_do_out_of_sync(spt)) {
1788 if (!spt->guest_page.oos_page)
1789 ppgtt_allocate_oos_page(spt);
1791 ret = ppgtt_set_guest_page_oos(spt);
1798 static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
1800 struct intel_vgpu *vgpu = mm->vgpu;
1801 struct intel_gvt *gvt = vgpu->gvt;
1802 struct intel_gvt_gtt *gtt = &gvt->gtt;
1803 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1804 struct intel_gvt_gtt_entry se;
1807 if (!mm->ppgtt_mm.shadowed)
1810 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
1811 ppgtt_get_shadow_root_entry(mm, &se, index);
1813 if (!ops->test_present(&se))
1816 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
1818 ppgtt_set_shadow_root_entry(mm, &se, index);
1820 trace_spt_guest_change(vgpu->id, "destroy root pointer",
1821 NULL, se.type, se.val64, index);
1824 mm->ppgtt_mm.shadowed = false;
1828 static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
1830 struct intel_vgpu *vgpu = mm->vgpu;
1831 struct intel_gvt *gvt = vgpu->gvt;
1832 struct intel_gvt_gtt *gtt = &gvt->gtt;
1833 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1834 struct intel_vgpu_ppgtt_spt *spt;
1835 struct intel_gvt_gtt_entry ge, se;
1838 if (mm->ppgtt_mm.shadowed)
1841 mm->ppgtt_mm.shadowed = true;
1843 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
1844 ppgtt_get_guest_root_entry(mm, &ge, index);
1846 if (!ops->test_present(&ge))
1849 trace_spt_guest_change(vgpu->id, __func__, NULL,
1850 ge.type, ge.val64, index);
1852 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1854 gvt_vgpu_err("fail to populate guest root pointer\n");
1858 ppgtt_generate_shadow_entry(&se, spt, &ge);
1859 ppgtt_set_shadow_root_entry(mm, &se, index);
1861 trace_spt_guest_change(vgpu->id, "populate root pointer",
1862 NULL, se.type, se.val64, index);
1867 invalidate_ppgtt_mm(mm);
1871 static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1873 struct intel_vgpu_mm *mm;
1875 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1880 kref_init(&mm->ref);
1881 atomic_set(&mm->pincount, 0);
1886 static void vgpu_free_mm(struct intel_vgpu_mm *mm)
1892 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
1894 * @root_entry_type: ppgtt root entry type
1895 * @pdps: guest pdps.
1897 * This function is used to create a ppgtt mm object for a vGPU.
1900 * Zero on success, negative error code in pointer if failed.
1902 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1903 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
1905 struct intel_gvt *gvt = vgpu->gvt;
1906 struct intel_vgpu_mm *mm;
1909 mm = vgpu_alloc_mm(vgpu);
1911 return ERR_PTR(-ENOMEM);
1913 mm->type = INTEL_GVT_MM_PPGTT;
1915 GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
1916 root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
1917 mm->ppgtt_mm.root_entry_type = root_entry_type;
1919 INIT_LIST_HEAD(&mm->ppgtt_mm.list);
1920 INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
1921 INIT_LIST_HEAD(&mm->ppgtt_mm.link);
1923 if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
1924 mm->ppgtt_mm.guest_pdps[0] = pdps[0];
1926 memcpy(mm->ppgtt_mm.guest_pdps, pdps,
1927 sizeof(mm->ppgtt_mm.guest_pdps));
1929 ret = shadow_ppgtt_mm(mm);
1931 gvt_vgpu_err("failed to shadow ppgtt mm\n");
1933 return ERR_PTR(ret);
1936 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1938 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1939 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
1940 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1945 static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1947 struct intel_vgpu_mm *mm;
1948 unsigned long nr_entries;
1950 mm = vgpu_alloc_mm(vgpu);
1952 return ERR_PTR(-ENOMEM);
1954 mm->type = INTEL_GVT_MM_GGTT;
1956 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
1957 mm->ggtt_mm.virtual_ggtt =
1958 vzalloc(array_size(nr_entries,
1959 vgpu->gvt->device_info.gtt_entry_size));
1960 if (!mm->ggtt_mm.virtual_ggtt) {
1962 return ERR_PTR(-ENOMEM);
1965 mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1966 if (!mm->ggtt_mm.host_ggtt_aperture) {
1967 vfree(mm->ggtt_mm.virtual_ggtt);
1969 return ERR_PTR(-ENOMEM);
1972 mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1973 if (!mm->ggtt_mm.host_ggtt_hidden) {
1974 vfree(mm->ggtt_mm.host_ggtt_aperture);
1975 vfree(mm->ggtt_mm.virtual_ggtt);
1977 return ERR_PTR(-ENOMEM);
1984 * _intel_vgpu_mm_release - destroy a mm object
1985 * @mm_ref: a kref object
1987 * This function is used to destroy a mm object for vGPU
1990 void _intel_vgpu_mm_release(struct kref *mm_ref)
1992 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1994 if (GEM_WARN_ON(atomic_read(&mm->pincount)))
1995 gvt_err("vgpu mm pin count bug detected\n");
1997 if (mm->type == INTEL_GVT_MM_PPGTT) {
1998 list_del(&mm->ppgtt_mm.list);
2000 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2001 list_del(&mm->ppgtt_mm.lru_list);
2002 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2004 invalidate_ppgtt_mm(mm);
2006 vfree(mm->ggtt_mm.virtual_ggtt);
2007 vfree(mm->ggtt_mm.host_ggtt_aperture);
2008 vfree(mm->ggtt_mm.host_ggtt_hidden);
2015 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
2016 * @mm: a vGPU mm object
2018 * This function is called when user doesn't want to use a vGPU mm object
2020 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
2022 atomic_dec_if_positive(&mm->pincount);
2026 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
2027 * @mm: target vgpu mm
2029 * This function is called when user wants to use a vGPU mm object. If this
2030 * mm object hasn't been shadowed yet, the shadow will be populated at this
2034 * Zero on success, negative error code if failed.
2036 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
2040 atomic_inc(&mm->pincount);
2042 if (mm->type == INTEL_GVT_MM_PPGTT) {
2043 ret = shadow_ppgtt_mm(mm);
2047 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2048 list_move_tail(&mm->ppgtt_mm.lru_list,
2049 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
2050 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2056 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
2058 struct intel_vgpu_mm *mm;
2059 struct list_head *pos, *n;
2061 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
2063 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
2064 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
2066 if (atomic_read(&mm->pincount))
2069 list_del_init(&mm->ppgtt_mm.lru_list);
2070 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2071 invalidate_ppgtt_mm(mm);
2074 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2079 * GMA translation APIs.
2081 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
2082 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
2084 struct intel_vgpu *vgpu = mm->vgpu;
2085 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2086 struct intel_vgpu_ppgtt_spt *s;
2088 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
2093 ppgtt_get_shadow_entry(s, e, index);
2095 ppgtt_get_guest_entry(s, e, index);
2100 * intel_vgpu_gma_to_gpa - translate a gma to GPA
2101 * @mm: mm object. could be a PPGTT or GGTT mm object
2102 * @gma: graphics memory address in this mm object
2104 * This function is used to translate a graphics memory address in specific
2105 * graphics memory space to guest physical address.
2108 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
2110 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
2112 struct intel_vgpu *vgpu = mm->vgpu;
2113 struct intel_gvt *gvt = vgpu->gvt;
2114 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
2115 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
2116 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
2117 unsigned long gma_index[4];
2118 struct intel_gvt_gtt_entry e;
2122 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
2123 mm->type != INTEL_GVT_MM_PPGTT);
2125 if (mm->type == INTEL_GVT_MM_GGTT) {
2126 if (!vgpu_gmadr_is_valid(vgpu, gma))
2129 ggtt_get_guest_entry(mm, &e,
2130 gma_ops->gma_to_ggtt_pte_index(gma));
2132 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
2133 + (gma & ~I915_GTT_PAGE_MASK);
2135 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
2137 switch (mm->ppgtt_mm.root_entry_type) {
2138 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2139 ppgtt_get_shadow_root_entry(mm, &e, 0);
2141 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
2142 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
2143 gma_index[2] = gma_ops->gma_to_pde_index(gma);
2144 gma_index[3] = gma_ops->gma_to_pte_index(gma);
2147 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2148 ppgtt_get_shadow_root_entry(mm, &e,
2149 gma_ops->gma_to_l3_pdp_index(gma));
2151 gma_index[0] = gma_ops->gma_to_pde_index(gma);
2152 gma_index[1] = gma_ops->gma_to_pte_index(gma);
2159 /* walk the shadow page table and get gpa from guest entry */
2160 for (i = 0; i < levels; i++) {
2161 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
2166 if (!pte_ops->test_present(&e)) {
2167 gvt_dbg_core("GMA 0x%lx is not present\n", gma);
2172 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
2173 (gma & ~I915_GTT_PAGE_MASK);
2174 trace_gma_translate(vgpu->id, "ppgtt", 0,
2175 mm->ppgtt_mm.root_entry_type, gma, gpa);
2180 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
2181 return INTEL_GVT_INVALID_ADDR;
2184 static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
2185 unsigned int off, void *p_data, unsigned int bytes)
2187 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2188 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2189 unsigned long index = off >> info->gtt_entry_size_shift;
2191 struct intel_gvt_gtt_entry e;
2193 if (bytes != 4 && bytes != 8)
2196 gma = index << I915_GTT_PAGE_SHIFT;
2197 if (!intel_gvt_ggtt_validate_range(vgpu,
2198 gma, 1 << I915_GTT_PAGE_SHIFT)) {
2199 gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
2200 memset(p_data, 0, bytes);
2204 ggtt_get_guest_entry(ggtt_mm, &e, index);
2205 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
2211 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
2213 * @off: register offset
2214 * @p_data: data will be returned to guest
2215 * @bytes: data length
2217 * This function is used to emulate the GTT MMIO register read
2220 * Zero on success, error code if failed.
2222 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
2223 void *p_data, unsigned int bytes)
2225 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2228 if (bytes != 4 && bytes != 8)
2231 off -= info->gtt_start_offset;
2232 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
2236 static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
2237 struct intel_gvt_gtt_entry *entry)
2239 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2242 pfn = pte_ops->get_pfn(entry);
2243 if (pfn != vgpu->gvt->gtt.scratch_mfn)
2244 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
2248 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2249 void *p_data, unsigned int bytes)
2251 struct intel_gvt *gvt = vgpu->gvt;
2252 const struct intel_gvt_device_info *info = &gvt->device_info;
2253 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2254 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
2255 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
2256 unsigned long gma, gfn;
2257 struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2258 struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2259 dma_addr_t dma_addr;
2261 struct intel_gvt_partial_pte *partial_pte, *pos, *n;
2262 bool partial_update = false;
2264 if (bytes != 4 && bytes != 8)
2267 gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
2269 /* the VM may configure the whole GM space when ballooning is used */
2270 if (!vgpu_gmadr_is_valid(vgpu, gma))
2273 e.type = GTT_TYPE_GGTT_PTE;
2274 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
2277 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
2278 * write, save the first 4 bytes in a list and update virtual
2279 * PTE. Only update shadow PTE when the second 4 bytes comes.
2281 if (bytes < info->gtt_entry_size) {
2284 list_for_each_entry_safe(pos, n,
2285 &ggtt_mm->ggtt_mm.partial_pte_list, list) {
2286 if (g_gtt_index == pos->offset >>
2287 info->gtt_entry_size_shift) {
2288 if (off != pos->offset) {
2289 /* the second partial part*/
2290 int last_off = pos->offset &
2291 (info->gtt_entry_size - 1);
2293 memcpy((void *)&e.val64 + last_off,
2294 (void *)&pos->data + last_off,
2297 list_del(&pos->list);
2303 /* update of the first partial part */
2304 pos->data = e.val64;
2305 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2311 /* the first partial part */
2312 partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
2315 partial_pte->offset = off;
2316 partial_pte->data = e.val64;
2317 list_add_tail(&partial_pte->list,
2318 &ggtt_mm->ggtt_mm.partial_pte_list);
2319 partial_update = true;
2323 if (!partial_update && (ops->test_present(&e))) {
2324 gfn = ops->get_pfn(&e);
2328 /* one PTE update may be issued in multiple writes and the
2329 * first write may not construct a valid gfn
2331 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
2332 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2336 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
2337 PAGE_SIZE, &dma_addr);
2339 gvt_vgpu_err("fail to populate guest ggtt entry\n");
2340 /* guest driver may read/write the entry when partial
2341 * update the entry in this situation p2m will fail
2342 * settting the shadow entry to point to a scratch page
2344 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2346 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
2348 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2349 ops->clear_present(&m);
2353 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2355 ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
2356 ggtt_invalidate_pte(vgpu, &e);
2358 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
2359 ggtt_invalidate(gvt->gt);
2364 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
2366 * @off: register offset
2367 * @p_data: data from guest write
2368 * @bytes: data length
2370 * This function is used to emulate the GTT MMIO register write
2373 * Zero on success, error code if failed.
2375 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
2376 unsigned int off, void *p_data, unsigned int bytes)
2378 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2380 struct intel_vgpu_submission *s = &vgpu->submission;
2381 struct intel_engine_cs *engine;
2384 if (bytes != 4 && bytes != 8)
2387 off -= info->gtt_start_offset;
2388 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
2390 /* if ggtt of last submitted context is written,
2391 * that context is probably got unpinned.
2392 * Set last shadowed ctx to invalid.
2394 for_each_engine(engine, vgpu->gvt->gt, i) {
2395 if (!s->last_ctx[i].valid)
2398 if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift))
2399 s->last_ctx[i].valid = false;
2404 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
2405 enum intel_gvt_gtt_type type)
2407 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
2408 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2409 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2410 int page_entry_num = I915_GTT_PAGE_SIZE >>
2411 vgpu->gvt->device_info.gtt_entry_size_shift;
2414 struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
2417 if (drm_WARN_ON(&i915->drm,
2418 type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
2421 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
2423 gvt_vgpu_err("fail to allocate scratch page\n");
2427 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
2428 4096, PCI_DMA_BIDIRECTIONAL);
2429 if (dma_mapping_error(dev, daddr)) {
2430 gvt_vgpu_err("fail to dmamap scratch_pt\n");
2431 __free_page(virt_to_page(scratch_pt));
2434 gtt->scratch_pt[type].page_mfn =
2435 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2436 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
2437 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2438 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
2440 /* Build the tree by full filled the scratch pt with the entries which
2441 * point to the next level scratch pt or scratch page. The
2442 * scratch_pt[type] indicate the scratch pt/scratch page used by the
2444 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
2445 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
2446 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
2448 if (type > GTT_TYPE_PPGTT_PTE_PT) {
2449 struct intel_gvt_gtt_entry se;
2451 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
2452 se.type = get_entry_type(type - 1);
2453 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
2455 /* The entry parameters like present/writeable/cache type
2456 * set to the same as i915's scratch page tree.
2458 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
2459 if (type == GTT_TYPE_PPGTT_PDE_PT)
2460 se.val64 |= PPAT_CACHED;
2462 for (i = 0; i < page_entry_num; i++)
2463 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
2469 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
2472 struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
2475 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2476 if (vgpu->gtt.scratch_pt[i].page != NULL) {
2477 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
2478 I915_GTT_PAGE_SHIFT);
2479 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2480 __free_page(vgpu->gtt.scratch_pt[i].page);
2481 vgpu->gtt.scratch_pt[i].page = NULL;
2482 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2489 static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2493 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2494 ret = alloc_scratch_pages(vgpu, i);
2502 release_scratch_page_tree(vgpu);
2507 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2510 * This function is used to initialize per-vGPU graphics memory virtualization
2514 * Zero on success, error code if failed.
2516 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2518 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2520 INIT_RADIX_TREE(>t->spt_tree, GFP_KERNEL);
2522 INIT_LIST_HEAD(>t->ppgtt_mm_list_head);
2523 INIT_LIST_HEAD(>t->oos_page_list_head);
2524 INIT_LIST_HEAD(>t->post_shadow_list_head);
2526 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2527 if (IS_ERR(gtt->ggtt_mm)) {
2528 gvt_vgpu_err("fail to create mm for ggtt.\n");
2529 return PTR_ERR(gtt->ggtt_mm);
2532 intel_vgpu_reset_ggtt(vgpu, false);
2534 INIT_LIST_HEAD(>t->ggtt_mm->ggtt_mm.partial_pte_list);
2536 return create_scratch_page_tree(vgpu);
2539 void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2541 struct list_head *pos, *n;
2542 struct intel_vgpu_mm *mm;
2544 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2545 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2546 intel_vgpu_destroy_mm(mm);
2549 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
2550 gvt_err("vgpu ppgtt mm is not fully destroyed\n");
2552 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
2553 gvt_err("Why we still has spt not freed?\n");
2554 ppgtt_free_all_spt(vgpu);
2558 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2560 struct intel_gvt_partial_pte *pos, *next;
2562 list_for_each_entry_safe(pos, next,
2563 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
2565 gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
2566 pos->offset, pos->data);
2569 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2570 vgpu->gtt.ggtt_mm = NULL;
2574 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2577 * This function is used to clean up per-vGPU graphics memory virtualization
2581 * Zero on success, error code if failed.
2583 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2585 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2586 intel_vgpu_destroy_ggtt_mm(vgpu);
2587 release_scratch_page_tree(vgpu);
2590 static void clean_spt_oos(struct intel_gvt *gvt)
2592 struct intel_gvt_gtt *gtt = &gvt->gtt;
2593 struct list_head *pos, *n;
2594 struct intel_vgpu_oos_page *oos_page;
2596 WARN(!list_empty(>t->oos_page_use_list_head),
2597 "someone is still using oos page\n");
2599 list_for_each_safe(pos, n, >t->oos_page_free_list_head) {
2600 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2601 list_del(&oos_page->list);
2602 free_page((unsigned long)oos_page->mem);
2607 static int setup_spt_oos(struct intel_gvt *gvt)
2609 struct intel_gvt_gtt *gtt = &gvt->gtt;
2610 struct intel_vgpu_oos_page *oos_page;
2614 INIT_LIST_HEAD(>t->oos_page_free_list_head);
2615 INIT_LIST_HEAD(>t->oos_page_use_list_head);
2617 for (i = 0; i < preallocated_oos_pages; i++) {
2618 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2623 oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
2624 if (!oos_page->mem) {
2630 INIT_LIST_HEAD(&oos_page->list);
2631 INIT_LIST_HEAD(&oos_page->vm_list);
2633 list_add_tail(&oos_page->list, >t->oos_page_free_list_head);
2636 gvt_dbg_mm("%d oos pages preallocated\n", i);
2645 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2647 * @pdps: pdp root array
2649 * This function is used to find a PPGTT mm object from mm object pool
2652 * pointer to mm object on success, NULL if failed.
2654 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2657 struct intel_vgpu_mm *mm;
2658 struct list_head *pos;
2660 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2661 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2663 switch (mm->ppgtt_mm.root_entry_type) {
2664 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2665 if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
2668 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2669 if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
2670 sizeof(mm->ppgtt_mm.guest_pdps)))
2681 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
2683 * @root_entry_type: ppgtt root entry type
2686 * This function is used to find or create a PPGTT mm object from a guest.
2689 * Zero on success, negative error code if failed.
2691 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
2692 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
2694 struct intel_vgpu_mm *mm;
2696 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2698 intel_vgpu_mm_get(mm);
2700 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
2702 gvt_vgpu_err("fail to create mm\n");
2708 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
2712 * This function is used to find a PPGTT mm object from a guest and destroy it.
2715 * Zero on success, negative error code if failed.
2717 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
2719 struct intel_vgpu_mm *mm;
2721 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2723 gvt_vgpu_err("fail to find ppgtt instance.\n");
2726 intel_vgpu_mm_put(mm);
2731 * intel_gvt_init_gtt - initialize mm components of a GVT device
2734 * This function is called at the initialization stage, to initialize
2735 * the mm components of a GVT device.
2738 * zero on success, negative error code if failed.
2740 int intel_gvt_init_gtt(struct intel_gvt *gvt)
2744 struct device *dev = &gvt->gt->i915->drm.pdev->dev;
2747 gvt_dbg_core("init gtt\n");
2749 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2750 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2752 page = (void *)get_zeroed_page(GFP_KERNEL);
2754 gvt_err("fail to allocate scratch ggtt page\n");
2758 daddr = dma_map_page(dev, virt_to_page(page), 0,
2759 4096, PCI_DMA_BIDIRECTIONAL);
2760 if (dma_mapping_error(dev, daddr)) {
2761 gvt_err("fail to dmamap scratch ggtt page\n");
2762 __free_page(virt_to_page(page));
2766 gvt->gtt.scratch_page = virt_to_page(page);
2767 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2769 if (enable_out_of_sync) {
2770 ret = setup_spt_oos(gvt);
2772 gvt_err("fail to initialize SPT oos\n");
2773 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2774 __free_page(gvt->gtt.scratch_page);
2778 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
2779 mutex_init(&gvt->gtt.ppgtt_mm_lock);
2784 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2787 * This function is called at the driver unloading stage, to clean up the
2788 * the mm components of a GVT device.
2791 void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2793 struct device *dev = &gvt->gt->i915->drm.pdev->dev;
2794 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
2795 I915_GTT_PAGE_SHIFT);
2797 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2799 __free_page(gvt->gtt.scratch_page);
2801 if (enable_out_of_sync)
2806 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
2809 * This function is called when invalidate all PPGTT instances of a vGPU.
2812 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2814 struct list_head *pos, *n;
2815 struct intel_vgpu_mm *mm;
2817 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2818 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2819 if (mm->type == INTEL_GVT_MM_PPGTT) {
2820 mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2821 list_del_init(&mm->ppgtt_mm.lru_list);
2822 mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2823 if (mm->ppgtt_mm.shadowed)
2824 invalidate_ppgtt_mm(mm);
2830 * intel_vgpu_reset_ggtt - reset the GGTT entry
2832 * @invalidate_old: invalidate old entries
2834 * This function is called at the vGPU create stage
2835 * to reset all the GGTT entries.
2838 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
2840 struct intel_gvt *gvt = vgpu->gvt;
2841 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2842 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
2843 struct intel_gvt_gtt_entry old_entry;
2847 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
2848 pte_ops->set_present(&entry);
2850 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2851 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2852 while (num_entries--) {
2853 if (invalidate_old) {
2854 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2855 ggtt_invalidate_pte(vgpu, &old_entry);
2857 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2860 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2861 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2862 while (num_entries--) {
2863 if (invalidate_old) {
2864 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2865 ggtt_invalidate_pte(vgpu, &old_entry);
2867 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2870 ggtt_invalidate(gvt->gt);
2874 * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
2875 * @gvt: intel gvt device
2877 * This function is called at driver resume stage to restore
2878 * GGTT entries of every vGPU.
2881 void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
2883 struct intel_vgpu *vgpu;
2884 struct intel_vgpu_mm *mm;
2887 u32 idx, num_low, num_hi, offset;
2889 /* Restore dirty host ggtt for all vGPUs */
2890 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
2891 mm = vgpu->gtt.ggtt_mm;
2893 num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2894 offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2895 for (idx = 0; idx < num_low; idx++) {
2896 pte = mm->ggtt_mm.host_ggtt_aperture[idx];
2897 if (pte & _PAGE_PRESENT)
2898 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
2901 num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2902 offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2903 for (idx = 0; idx < num_hi; idx++) {
2904 pte = mm->ggtt_mm.host_ggtt_hidden[idx];
2905 if (pte & _PAGE_PRESENT)
2906 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);