GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_virt.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "amdgpu.h"
25 #define MAX_KIQ_REG_WAIT        5000 /* in usecs, 5ms */
26 #define MAX_KIQ_REG_BAILOUT_INTERVAL    5 /* in msecs, 5ms */
27 #define MAX_KIQ_REG_TRY 20
28
29 uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
30 {
31         uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
32
33         addr -= AMDGPU_VA_RESERVED_SIZE;
34
35         if (addr >= AMDGPU_VA_HOLE_START)
36                 addr |= AMDGPU_VA_HOLE_END;
37
38         return addr;
39 }
40
41 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
42 {
43         /* By now all MMIO pages except mailbox are blocked */
44         /* if blocking is enabled in hypervisor. Choose the */
45         /* SCRATCH_REG0 to test. */
46         return RREG32_NO_KIQ(0xc040) == 0xffffffff;
47 }
48
49 int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
50 {
51         int r;
52         void *ptr;
53
54         r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
55                                 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
56                                 &adev->virt.csa_vmid0_addr, &ptr);
57         if (r)
58                 return r;
59
60         memset(ptr, 0, AMDGPU_CSA_SIZE);
61         return 0;
62 }
63
64 void amdgpu_free_static_csa(struct amdgpu_device *adev) {
65         amdgpu_bo_free_kernel(&adev->virt.csa_obj,
66                                                 &adev->virt.csa_vmid0_addr,
67                                                 NULL);
68 }
69
70 /*
71  * amdgpu_map_static_csa should be called during amdgpu_vm_init
72  * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
73  * submission of GFX should use this virtual address within META_DATA init
74  * package to support SRIOV gfx preemption.
75  */
76 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
77                           struct amdgpu_bo_va **bo_va)
78 {
79         uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
80         struct ww_acquire_ctx ticket;
81         struct list_head list;
82         struct amdgpu_bo_list_entry pd;
83         struct ttm_validate_buffer csa_tv;
84         int r;
85
86         INIT_LIST_HEAD(&list);
87         INIT_LIST_HEAD(&csa_tv.head);
88         csa_tv.bo = &adev->virt.csa_obj->tbo;
89         csa_tv.shared = true;
90
91         list_add(&csa_tv.head, &list);
92         amdgpu_vm_get_pd_bo(vm, &list, &pd);
93
94         r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
95         if (r) {
96                 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
97                 return r;
98         }
99
100         *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
101         if (!*bo_va) {
102                 ttm_eu_backoff_reservation(&ticket, &list);
103                 DRM_ERROR("failed to create bo_va for static CSA\n");
104                 return -ENOMEM;
105         }
106
107         r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
108                                 AMDGPU_CSA_SIZE);
109         if (r) {
110                 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
111                 amdgpu_vm_bo_rmv(adev, *bo_va);
112                 ttm_eu_backoff_reservation(&ticket, &list);
113                 return r;
114         }
115
116         r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
117                              AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
118                              AMDGPU_PTE_EXECUTABLE);
119
120         if (r) {
121                 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
122                 amdgpu_vm_bo_rmv(adev, *bo_va);
123                 ttm_eu_backoff_reservation(&ticket, &list);
124                 return r;
125         }
126
127         ttm_eu_backoff_reservation(&ticket, &list);
128         return 0;
129 }
130
131 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
132 {
133         /* enable virtual display */
134         adev->mode_info.num_crtc = 1;
135         adev->enable_virtual_display = true;
136         adev->cg_flags = 0;
137         adev->pg_flags = 0;
138 }
139
140 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
141 {
142         signed long r, cnt = 0;
143         unsigned long flags;
144         uint32_t seq;
145         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
146         struct amdgpu_ring *ring = &kiq->ring;
147
148         BUG_ON(!ring->funcs->emit_rreg);
149
150         spin_lock_irqsave(&kiq->ring_lock, flags);
151         amdgpu_ring_alloc(ring, 32);
152         amdgpu_ring_emit_rreg(ring, reg);
153         amdgpu_fence_emit_polling(ring, &seq);
154         amdgpu_ring_commit(ring);
155         spin_unlock_irqrestore(&kiq->ring_lock, flags);
156
157         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
158
159         /* don't wait anymore for gpu reset case because this way may
160          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
161          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
162          * never return if we keep waiting in virt_kiq_rreg, which cause
163          * gpu_recover() hang there.
164          *
165          * also don't wait anymore for IRQ context
166          * */
167         if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
168                 goto failed_kiq_read;
169
170         if (in_interrupt())
171                 might_sleep();
172
173         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
174                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
175                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
176         }
177
178         if (cnt > MAX_KIQ_REG_TRY)
179                 goto failed_kiq_read;
180
181         return adev->wb.wb[adev->virt.reg_val_offs];
182
183 failed_kiq_read:
184         pr_err("failed to read reg:%x\n", reg);
185         return ~0;
186 }
187
188 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
189 {
190         signed long r, cnt = 0;
191         unsigned long flags;
192         uint32_t seq;
193         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
194         struct amdgpu_ring *ring = &kiq->ring;
195
196         BUG_ON(!ring->funcs->emit_wreg);
197
198         spin_lock_irqsave(&kiq->ring_lock, flags);
199         amdgpu_ring_alloc(ring, 32);
200         amdgpu_ring_emit_wreg(ring, reg, v);
201         amdgpu_fence_emit_polling(ring, &seq);
202         amdgpu_ring_commit(ring);
203         spin_unlock_irqrestore(&kiq->ring_lock, flags);
204
205         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
206
207         /* don't wait anymore for gpu reset case because this way may
208          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
209          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
210          * never return if we keep waiting in virt_kiq_rreg, which cause
211          * gpu_recover() hang there.
212          *
213          * also don't wait anymore for IRQ context
214          * */
215         if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
216                 goto failed_kiq_write;
217
218         if (in_interrupt())
219                 might_sleep();
220
221         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
222
223                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
224                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
225         }
226
227         if (cnt > MAX_KIQ_REG_TRY)
228                 goto failed_kiq_write;
229
230         return;
231
232 failed_kiq_write:
233         pr_err("failed to write reg:%x\n", reg);
234 }
235
236 /**
237  * amdgpu_virt_request_full_gpu() - request full gpu access
238  * @amdgpu:     amdgpu device.
239  * @init:       is driver init time.
240  * When start to init/fini driver, first need to request full gpu access.
241  * Return: Zero if request success, otherwise will return error.
242  */
243 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
244 {
245         struct amdgpu_virt *virt = &adev->virt;
246         int r;
247
248         if (virt->ops && virt->ops->req_full_gpu) {
249                 r = virt->ops->req_full_gpu(adev, init);
250                 if (r)
251                         return r;
252
253                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
254         }
255
256         return 0;
257 }
258
259 /**
260  * amdgpu_virt_release_full_gpu() - release full gpu access
261  * @amdgpu:     amdgpu device.
262  * @init:       is driver init time.
263  * When finishing driver init/fini, need to release full gpu access.
264  * Return: Zero if release success, otherwise will returen error.
265  */
266 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
267 {
268         struct amdgpu_virt *virt = &adev->virt;
269         int r;
270
271         if (virt->ops && virt->ops->rel_full_gpu) {
272                 r = virt->ops->rel_full_gpu(adev, init);
273                 if (r)
274                         return r;
275
276                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
277         }
278         return 0;
279 }
280
281 /**
282  * amdgpu_virt_reset_gpu() - reset gpu
283  * @amdgpu:     amdgpu device.
284  * Send reset command to GPU hypervisor to reset GPU that VM is using
285  * Return: Zero if reset success, otherwise will return error.
286  */
287 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
288 {
289         struct amdgpu_virt *virt = &adev->virt;
290         int r;
291
292         if (virt->ops && virt->ops->reset_gpu) {
293                 r = virt->ops->reset_gpu(adev);
294                 if (r)
295                         return r;
296
297                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
298         }
299
300         return 0;
301 }
302
303 /**
304  * amdgpu_virt_wait_reset() - wait for reset gpu completed
305  * @amdgpu:     amdgpu device.
306  * Wait for GPU reset completed.
307  * Return: Zero if reset success, otherwise will return error.
308  */
309 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
310 {
311         struct amdgpu_virt *virt = &adev->virt;
312
313         if (!virt->ops || !virt->ops->wait_reset)
314                 return -EINVAL;
315
316         return virt->ops->wait_reset(adev);
317 }
318
319 /**
320  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
321  * @amdgpu:     amdgpu device.
322  * MM table is used by UVD and VCE for its initialization
323  * Return: Zero if allocate success.
324  */
325 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
326 {
327         int r;
328
329         if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
330                 return 0;
331
332         r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
333                                     AMDGPU_GEM_DOMAIN_VRAM,
334                                     &adev->virt.mm_table.bo,
335                                     &adev->virt.mm_table.gpu_addr,
336                                     (void *)&adev->virt.mm_table.cpu_addr);
337         if (r) {
338                 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
339                 return r;
340         }
341
342         memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
343         DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
344                  adev->virt.mm_table.gpu_addr,
345                  adev->virt.mm_table.cpu_addr);
346         return 0;
347 }
348
349 /**
350  * amdgpu_virt_free_mm_table() - free mm table memory
351  * @amdgpu:     amdgpu device.
352  * Free MM table memory
353  */
354 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
355 {
356         if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
357                 return;
358
359         amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
360                               &adev->virt.mm_table.gpu_addr,
361                               (void *)&adev->virt.mm_table.cpu_addr);
362         adev->virt.mm_table.gpu_addr = 0;
363 }
364
365
366 int amdgpu_virt_fw_reserve_get_checksum(void *obj,
367                                         unsigned long obj_size,
368                                         unsigned int key,
369                                         unsigned int chksum)
370 {
371         unsigned int ret = key;
372         unsigned long i = 0;
373         unsigned char *pos;
374
375         pos = (char *)obj;
376         /* calculate checksum */
377         for (i = 0; i < obj_size; ++i)
378                 ret += *(pos + i);
379         /* minus the chksum itself */
380         pos = (char *)&chksum;
381         for (i = 0; i < sizeof(chksum); ++i)
382                 ret -= *(pos + i);
383         return ret;
384 }
385
386 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
387 {
388         uint32_t pf2vf_size = 0;
389         uint32_t checksum = 0;
390         uint32_t checkval;
391         char *str;
392
393         adev->virt.fw_reserve.p_pf2vf = NULL;
394         adev->virt.fw_reserve.p_vf2pf = NULL;
395
396         if (adev->fw_vram_usage.va != NULL) {
397                 adev->virt.fw_reserve.p_pf2vf =
398                         (struct amdgim_pf2vf_info_header *)(
399                         adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
400                 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
401                 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
402                 AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
403
404                 /* pf2vf message must be in 4K */
405                 if (pf2vf_size > 0 && pf2vf_size < 4096) {
406                         checkval = amdgpu_virt_fw_reserve_get_checksum(
407                                 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
408                                 adev->virt.fw_reserve.checksum_key, checksum);
409                         if (checkval == checksum) {
410                                 adev->virt.fw_reserve.p_vf2pf =
411                                         ((void *)adev->virt.fw_reserve.p_pf2vf +
412                                         pf2vf_size);
413                                 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
414                                         sizeof(amdgim_vf2pf_info));
415                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
416                                         AMDGPU_FW_VRAM_VF2PF_VER);
417                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
418                                         sizeof(amdgim_vf2pf_info));
419                                 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
420                                         &str);
421 #ifdef MODULE
422                                 if (THIS_MODULE->version != NULL)
423                                         strcpy(str, THIS_MODULE->version);
424                                 else
425 #endif
426                                         strcpy(str, "N/A");
427                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
428                                         0);
429                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
430                                         amdgpu_virt_fw_reserve_get_checksum(
431                                         adev->virt.fw_reserve.p_vf2pf,
432                                         pf2vf_size,
433                                         adev->virt.fw_reserve.checksum_key, 0));
434                         }
435                 }
436         }
437 }
438
439