2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
25 * Authors: Christian König <christian.koenig@amd.com>
28 #include <linux/firmware.h>
29 #include <linux/module.h>
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE "/*(DEBLOBBED)*/"
44 #define FIRMWARE_KABINI "/*(DEBLOBBED)*/"
45 #define FIRMWARE_KAVERI "/*(DEBLOBBED)*/"
46 #define FIRMWARE_HAWAII "/*(DEBLOBBED)*/"
47 #define FIRMWARE_MULLINS "/*(DEBLOBBED)*/"
49 #define FIRMWARE_TONGA "/*(DEBLOBBED)*/"
50 #define FIRMWARE_CARRIZO "/*(DEBLOBBED)*/"
51 #define FIRMWARE_FIJI "/*(DEBLOBBED)*/"
52 #define FIRMWARE_STONEY "/*(DEBLOBBED)*/"
53 #define FIRMWARE_POLARIS10 "/*(DEBLOBBED)*/"
54 #define FIRMWARE_POLARIS11 "/*(DEBLOBBED)*/"
56 #ifdef CONFIG_DRM_AMDGPU_CIK
61 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
64 * amdgpu_vce_init - allocate memory, load vce firmware
66 * @adev: amdgpu_device pointer
68 * First step to get VCE online, allocate memory and load the firmware
70 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
72 struct amdgpu_ring *ring;
73 struct amd_sched_rq *rq;
75 const struct common_firmware_header *hdr;
76 unsigned ucode_version, version_major, version_minor, binary_id;
79 switch (adev->asic_type) {
80 #ifdef CONFIG_DRM_AMDGPU_CIK
82 fw_name = FIRMWARE_BONAIRE;
85 fw_name = FIRMWARE_KAVERI;
88 fw_name = FIRMWARE_KABINI;
91 fw_name = FIRMWARE_HAWAII;
94 fw_name = FIRMWARE_MULLINS;
98 fw_name = FIRMWARE_TONGA;
101 fw_name = FIRMWARE_CARRIZO;
104 fw_name = FIRMWARE_FIJI;
107 fw_name = FIRMWARE_STONEY;
110 fw_name = FIRMWARE_POLARIS10;
113 fw_name = FIRMWARE_POLARIS11;
120 r = reject_firmware(&adev->vce.fw, fw_name, adev->dev);
122 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
127 r = amdgpu_ucode_validate(adev->vce.fw);
129 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
131 release_firmware(adev->vce.fw);
136 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
138 ucode_version = le32_to_cpu(hdr->ucode_version);
139 version_major = (ucode_version >> 20) & 0xfff;
140 version_minor = (ucode_version >> 8) & 0xfff;
141 binary_id = ucode_version & 0xff;
142 DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
143 version_major, version_minor, binary_id);
144 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
147 /* allocate firmware, stack and heap BO */
149 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
150 AMDGPU_GEM_DOMAIN_VRAM,
151 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
152 NULL, NULL, &adev->vce.vcpu_bo);
154 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
158 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
160 amdgpu_bo_unref(&adev->vce.vcpu_bo);
161 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
165 r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
166 &adev->vce.gpu_addr);
167 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
169 amdgpu_bo_unref(&adev->vce.vcpu_bo);
170 dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
175 ring = &adev->vce.ring[0];
176 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
177 r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
178 rq, amdgpu_sched_jobs);
180 DRM_ERROR("Failed setting up VCE run queue.\n");
184 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
185 atomic_set(&adev->vce.handles[i], 0);
186 adev->vce.filp[i] = NULL;
189 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
190 mutex_init(&adev->vce.idle_mutex);
196 * amdgpu_vce_fini - free memory
198 * @adev: amdgpu_device pointer
200 * Last step on VCE teardown, free firmware memory
202 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
206 if (adev->vce.vcpu_bo == NULL)
209 amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
211 amdgpu_bo_unref(&adev->vce.vcpu_bo);
213 for (i = 0; i < adev->vce.num_rings; i++)
214 amdgpu_ring_fini(&adev->vce.ring[i]);
216 release_firmware(adev->vce.fw);
217 mutex_destroy(&adev->vce.idle_mutex);
223 * amdgpu_vce_suspend - unpin VCE fw memory
225 * @adev: amdgpu_device pointer
228 int amdgpu_vce_suspend(struct amdgpu_device *adev)
232 if (adev->vce.vcpu_bo == NULL)
235 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
236 if (atomic_read(&adev->vce.handles[i]))
239 if (i == AMDGPU_MAX_VCE_HANDLES)
242 cancel_delayed_work_sync(&adev->vce.idle_work);
243 /* TODO: suspending running encoding sessions isn't supported */
248 * amdgpu_vce_resume - pin VCE fw memory
250 * @adev: amdgpu_device pointer
253 int amdgpu_vce_resume(struct amdgpu_device *adev)
256 const struct common_firmware_header *hdr;
260 if (adev->vce.vcpu_bo == NULL)
263 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
265 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
269 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
271 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
272 dev_err(adev->dev, "(%d) VCE map failed\n", r);
276 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
277 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
278 memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
279 adev->vce.fw->size - offset);
281 amdgpu_bo_kunmap(adev->vce.vcpu_bo);
283 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
289 * amdgpu_vce_idle_work_handler - power off VCE
291 * @work: pointer to work structure
293 * power of VCE when it's not used any more
295 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
297 struct amdgpu_device *adev =
298 container_of(work, struct amdgpu_device, vce.idle_work.work);
299 unsigned i, count = 0;
301 for (i = 0; i < adev->vce.num_rings; i++)
302 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
305 if (adev->pm.dpm_enabled) {
306 amdgpu_dpm_enable_vce(adev, false);
308 amdgpu_asic_set_vce_clocks(adev, 0, 0);
309 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
311 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
315 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
320 * amdgpu_vce_ring_begin_use - power up VCE
324 * Make sure VCE is powerd up when we want to use it
326 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
328 struct amdgpu_device *adev = ring->adev;
331 mutex_lock(&adev->vce.idle_mutex);
332 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
334 if (adev->pm.dpm_enabled) {
335 amdgpu_dpm_enable_vce(adev, true);
337 amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
338 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
339 AMD_CG_STATE_UNGATE);
340 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
341 AMD_PG_STATE_UNGATE);
345 mutex_unlock(&adev->vce.idle_mutex);
349 * amdgpu_vce_ring_end_use - power VCE down
353 * Schedule work to power VCE down again
355 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
357 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
361 * amdgpu_vce_free_handles - free still open VCE handles
363 * @adev: amdgpu_device pointer
364 * @filp: drm file pointer
366 * Close all VCE handles still open by this file pointer
368 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
370 struct amdgpu_ring *ring = &adev->vce.ring[0];
372 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
373 uint32_t handle = atomic_read(&adev->vce.handles[i]);
375 if (!handle || adev->vce.filp[i] != filp)
378 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
380 DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
382 adev->vce.filp[i] = NULL;
383 atomic_set(&adev->vce.handles[i], 0);
388 * amdgpu_vce_get_create_msg - generate a VCE create msg
390 * @adev: amdgpu_device pointer
391 * @ring: ring we should submit the msg to
392 * @handle: VCE session handle to use
393 * @fence: optional fence to return
395 * Open up a stream for HW test
397 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
398 struct fence **fence)
400 const unsigned ib_size_dw = 1024;
401 struct amdgpu_job *job;
402 struct amdgpu_ib *ib;
403 struct fence *f = NULL;
407 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
413 dummy = ib->gpu_addr + 1024;
415 /* stitch together an VCE create msg */
417 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
418 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
419 ib->ptr[ib->length_dw++] = handle;
421 if ((ring->adev->vce.fw_version >> 24) >= 52)
422 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
424 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
425 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
426 ib->ptr[ib->length_dw++] = 0x00000000;
427 ib->ptr[ib->length_dw++] = 0x00000042;
428 ib->ptr[ib->length_dw++] = 0x0000000a;
429 ib->ptr[ib->length_dw++] = 0x00000001;
430 ib->ptr[ib->length_dw++] = 0x00000080;
431 ib->ptr[ib->length_dw++] = 0x00000060;
432 ib->ptr[ib->length_dw++] = 0x00000100;
433 ib->ptr[ib->length_dw++] = 0x00000100;
434 ib->ptr[ib->length_dw++] = 0x0000000c;
435 ib->ptr[ib->length_dw++] = 0x00000000;
436 if ((ring->adev->vce.fw_version >> 24) >= 52) {
437 ib->ptr[ib->length_dw++] = 0x00000000;
438 ib->ptr[ib->length_dw++] = 0x00000000;
439 ib->ptr[ib->length_dw++] = 0x00000000;
440 ib->ptr[ib->length_dw++] = 0x00000000;
443 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
444 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
445 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
446 ib->ptr[ib->length_dw++] = dummy;
447 ib->ptr[ib->length_dw++] = 0x00000001;
449 for (i = ib->length_dw; i < ib_size_dw; ++i)
452 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
453 job->fence = fence_get(f);
457 amdgpu_job_free(job);
459 *fence = fence_get(f);
464 amdgpu_job_free(job);
469 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
471 * @adev: amdgpu_device pointer
472 * @ring: ring we should submit the msg to
473 * @handle: VCE session handle to use
474 * @fence: optional fence to return
476 * Close up a stream for HW test or if userspace failed to do so
478 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
479 bool direct, struct fence **fence)
481 const unsigned ib_size_dw = 1024;
482 struct amdgpu_job *job;
483 struct amdgpu_ib *ib;
484 struct fence *f = NULL;
487 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
493 /* stitch together an VCE destroy msg */
495 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
496 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
497 ib->ptr[ib->length_dw++] = handle;
499 ib->ptr[ib->length_dw++] = 0x00000020; /* len */
500 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
501 ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
502 ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
503 ib->ptr[ib->length_dw++] = 0x00000000;
504 ib->ptr[ib->length_dw++] = 0x00000000;
505 ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
506 ib->ptr[ib->length_dw++] = 0x00000000;
508 ib->ptr[ib->length_dw++] = 0x00000008; /* len */
509 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
511 for (i = ib->length_dw; i < ib_size_dw; ++i)
515 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
516 job->fence = fence_get(f);
520 amdgpu_job_free(job);
522 r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
523 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
529 *fence = fence_get(f);
534 amdgpu_job_free(job);
539 * amdgpu_vce_cs_reloc - command submission relocation
542 * @lo: address of lower dword
543 * @hi: address of higher dword
544 * @size: minimum size
546 * Patch relocation inside command stream with real buffer address
548 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
549 int lo, int hi, unsigned size, uint32_t index)
551 struct amdgpu_bo_va_mapping *mapping;
552 struct amdgpu_bo *bo;
555 if (index == 0xffffffff)
558 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
559 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
560 addr += ((uint64_t)size) * ((uint64_t)index);
562 mapping = amdgpu_cs_find_mapping(p, addr, &bo);
563 if (mapping == NULL) {
564 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
565 addr, lo, hi, size, index);
569 if ((addr + (uint64_t)size) >
570 ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
571 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
576 addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
577 addr += amdgpu_bo_gpu_offset(bo);
578 addr -= ((uint64_t)size) * ((uint64_t)index);
580 amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
581 amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
587 * amdgpu_vce_validate_handle - validate stream handle
590 * @handle: handle to validate
591 * @allocated: allocated a new handle?
593 * Validates the handle and return the found session index or -EINVAL
594 * we we don't have another free session index.
596 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
597 uint32_t handle, uint32_t *allocated)
601 /* validate the handle */
602 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
603 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
604 if (p->adev->vce.filp[i] != p->filp) {
605 DRM_ERROR("VCE handle collision detected!\n");
612 /* handle not found try to alloc a new one */
613 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
614 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
615 p->adev->vce.filp[i] = p->filp;
616 p->adev->vce.img_size[i] = 0;
617 *allocated |= 1 << i;
622 DRM_ERROR("No more free VCE handles!\n");
627 * amdgpu_vce_cs_parse - parse and validate the command stream
632 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
634 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
635 unsigned fb_idx = 0, bs_idx = 0;
636 int session_idx = -1;
637 uint32_t destroyed = 0;
638 uint32_t created = 0;
639 uint32_t allocated = 0;
640 uint32_t tmp, handle = 0;
641 uint32_t *size = &tmp;
642 int i, r = 0, idx = 0;
644 r = amdgpu_cs_sysvm_access_required(p);
648 while (idx < ib->length_dw) {
649 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
650 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
652 if ((len < 8) || (len & 3)) {
653 DRM_ERROR("invalid VCE command length (%d)!\n", len);
659 case 0x00000001: /* session */
660 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
661 session_idx = amdgpu_vce_validate_handle(p, handle,
663 if (session_idx < 0) {
667 size = &p->adev->vce.img_size[session_idx];
670 case 0x00000002: /* task info */
671 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
672 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
675 case 0x01000001: /* create */
676 created |= 1 << session_idx;
677 if (destroyed & (1 << session_idx)) {
678 destroyed &= ~(1 << session_idx);
679 allocated |= 1 << session_idx;
681 } else if (!(allocated & (1 << session_idx))) {
682 DRM_ERROR("Handle already in use!\n");
687 *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
688 amdgpu_get_ib_value(p, ib_idx, idx + 10) *
692 case 0x04000001: /* config extension */
693 case 0x04000002: /* pic control */
694 case 0x04000005: /* rate control */
695 case 0x04000007: /* motion estimation */
696 case 0x04000008: /* rdo */
697 case 0x04000009: /* vui */
698 case 0x05000002: /* auxiliary buffer */
699 case 0x05000009: /* clock table */
702 case 0x0500000c: /* hw config */
703 switch (p->adev->asic_type) {
704 #ifdef CONFIG_DRM_AMDGPU_CIK
716 case 0x03000001: /* encode */
717 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
722 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
728 case 0x02000001: /* destroy */
729 destroyed |= 1 << session_idx;
732 case 0x05000001: /* context buffer */
733 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
739 case 0x05000004: /* video bitstream buffer */
740 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
741 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
747 case 0x05000005: /* feedback buffer */
748 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
755 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
760 if (session_idx == -1) {
761 DRM_ERROR("no session command at start of IB\n");
769 if (allocated & ~created) {
770 DRM_ERROR("New session without create command!\n");
776 /* No error, free all destroyed handle slots */
779 /* Error during parsing, free all allocated handle slots */
783 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
785 atomic_set(&p->adev->vce.handles[i], 0);
791 * amdgpu_vce_ring_emit_ib - execute indirect buffer
793 * @ring: engine to use
794 * @ib: the IB to execute
797 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
798 unsigned vm_id, bool ctx_switch)
800 amdgpu_ring_write(ring, VCE_CMD_IB);
801 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
802 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
803 amdgpu_ring_write(ring, ib->length_dw);
807 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
809 * @ring: engine to use
813 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
816 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
818 amdgpu_ring_write(ring, VCE_CMD_FENCE);
819 amdgpu_ring_write(ring, addr);
820 amdgpu_ring_write(ring, upper_32_bits(addr));
821 amdgpu_ring_write(ring, seq);
822 amdgpu_ring_write(ring, VCE_CMD_TRAP);
823 amdgpu_ring_write(ring, VCE_CMD_END);
826 unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
829 4; /* amdgpu_vce_ring_emit_ib */
832 unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
835 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
839 * amdgpu_vce_ring_test_ring - test if VCE ring is working
841 * @ring: the engine to test on
844 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
846 struct amdgpu_device *adev = ring->adev;
847 uint32_t rptr = amdgpu_ring_get_rptr(ring);
851 r = amdgpu_ring_alloc(ring, 16);
853 DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
857 amdgpu_ring_write(ring, VCE_CMD_END);
858 amdgpu_ring_commit(ring);
860 for (i = 0; i < adev->usec_timeout; i++) {
861 if (amdgpu_ring_get_rptr(ring) != rptr)
866 if (i < adev->usec_timeout) {
867 DRM_INFO("ring test on %d succeeded in %d usecs\n",
870 DRM_ERROR("amdgpu: ring %d test failed\n",
879 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
881 * @ring: the engine to test on
884 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
886 struct fence *fence = NULL;
889 /* skip vce ring1/2 ib test for now, since it's not reliable */
890 if (ring != &ring->adev->vce.ring[0])
893 r = amdgpu_vce_get_create_msg(ring, 1, NULL);
895 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
899 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
901 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
905 r = fence_wait_timeout(fence, false, timeout);
907 DRM_ERROR("amdgpu: IB test timed out.\n");
910 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
912 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);