GNU Linux-libre 4.9.283-gnu1
[releases.git] / drivers / gpu / drm / amd / amdgpu / uvd_v6_0.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König <christian.koenig@amd.com>
23  */
24
25 #include <linux/firmware.h>
26 #include <drm/drmP.h>
27 #include "amdgpu.h"
28 #include "amdgpu_uvd.h"
29 #include "vid.h"
30 #include "uvd/uvd_6_0_d.h"
31 #include "uvd/uvd_6_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "smu/smu_7_1_3_d.h"
35 #include "smu/smu_7_1_3_sh_mask.h"
36 #include "bif/bif_5_1_d.h"
37 #include "gmc/gmc_8_1_d.h"
38 #include "vi.h"
39
40 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
41 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
42 static int uvd_v6_0_start(struct amdgpu_device *adev);
43 static void uvd_v6_0_stop(struct amdgpu_device *adev);
44 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
45
46 /**
47  * uvd_v6_0_ring_get_rptr - get read pointer
48  *
49  * @ring: amdgpu_ring pointer
50  *
51  * Returns the current hardware read pointer
52  */
53 static uint32_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
54 {
55         struct amdgpu_device *adev = ring->adev;
56
57         return RREG32(mmUVD_RBC_RB_RPTR);
58 }
59
60 /**
61  * uvd_v6_0_ring_get_wptr - get write pointer
62  *
63  * @ring: amdgpu_ring pointer
64  *
65  * Returns the current hardware write pointer
66  */
67 static uint32_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
68 {
69         struct amdgpu_device *adev = ring->adev;
70
71         return RREG32(mmUVD_RBC_RB_WPTR);
72 }
73
74 /**
75  * uvd_v6_0_ring_set_wptr - set write pointer
76  *
77  * @ring: amdgpu_ring pointer
78  *
79  * Commits the write pointer to the hardware
80  */
81 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
82 {
83         struct amdgpu_device *adev = ring->adev;
84
85         WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
86 }
87
88 static int uvd_v6_0_early_init(void *handle)
89 {
90         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
91
92         if (!(adev->flags & AMD_IS_APU) &&
93             (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
94                 return -ENOENT;
95
96         uvd_v6_0_set_ring_funcs(adev);
97         uvd_v6_0_set_irq_funcs(adev);
98
99         return 0;
100 }
101
102 static int uvd_v6_0_sw_init(void *handle)
103 {
104         struct amdgpu_ring *ring;
105         int r;
106         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
107
108         /* UVD TRAP */
109         r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
110         if (r)
111                 return r;
112
113         r = amdgpu_uvd_sw_init(adev);
114         if (r)
115                 return r;
116
117         r = amdgpu_uvd_resume(adev);
118         if (r)
119                 return r;
120
121         ring = &adev->uvd.ring;
122         sprintf(ring->name, "uvd");
123         r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
124                              &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
125
126         return r;
127 }
128
129 static int uvd_v6_0_sw_fini(void *handle)
130 {
131         int r;
132         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
133
134         r = amdgpu_uvd_suspend(adev);
135         if (r)
136                 return r;
137
138         r = amdgpu_uvd_sw_fini(adev);
139         if (r)
140                 return r;
141
142         return r;
143 }
144
145 /**
146  * uvd_v6_0_hw_init - start and test UVD block
147  *
148  * @adev: amdgpu_device pointer
149  *
150  * Initialize the hardware, boot up the VCPU and do some testing
151  */
152 static int uvd_v6_0_hw_init(void *handle)
153 {
154         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
155         struct amdgpu_ring *ring = &adev->uvd.ring;
156         uint32_t tmp;
157         int r;
158
159         r = uvd_v6_0_start(adev);
160         if (r)
161                 goto done;
162
163         ring->ready = true;
164         r = amdgpu_ring_test_ring(ring);
165         if (r) {
166                 ring->ready = false;
167                 goto done;
168         }
169
170         r = amdgpu_ring_alloc(ring, 10);
171         if (r) {
172                 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
173                 goto done;
174         }
175
176         tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
177         amdgpu_ring_write(ring, tmp);
178         amdgpu_ring_write(ring, 0xFFFFF);
179
180         tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
181         amdgpu_ring_write(ring, tmp);
182         amdgpu_ring_write(ring, 0xFFFFF);
183
184         tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
185         amdgpu_ring_write(ring, tmp);
186         amdgpu_ring_write(ring, 0xFFFFF);
187
188         /* Clear timeout status bits */
189         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
190         amdgpu_ring_write(ring, 0x8);
191
192         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
193         amdgpu_ring_write(ring, 3);
194
195         amdgpu_ring_commit(ring);
196
197 done:
198         if (!r)
199                 DRM_INFO("UVD initialized successfully.\n");
200
201         return r;
202 }
203
204 /**
205  * uvd_v6_0_hw_fini - stop the hardware block
206  *
207  * @adev: amdgpu_device pointer
208  *
209  * Stop the UVD block, mark ring as not ready any more
210  */
211 static int uvd_v6_0_hw_fini(void *handle)
212 {
213         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
214         struct amdgpu_ring *ring = &adev->uvd.ring;
215
216         uvd_v6_0_stop(adev);
217         ring->ready = false;
218
219         return 0;
220 }
221
222 static int uvd_v6_0_suspend(void *handle)
223 {
224         int r;
225         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
226
227         r = uvd_v6_0_hw_fini(adev);
228         if (r)
229                 return r;
230
231         /* Skip this for APU for now */
232         if (!(adev->flags & AMD_IS_APU)) {
233                 r = amdgpu_uvd_suspend(adev);
234                 if (r)
235                         return r;
236         }
237
238         return r;
239 }
240
241 static int uvd_v6_0_resume(void *handle)
242 {
243         int r;
244         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
245
246         /* Skip this for APU for now */
247         if (!(adev->flags & AMD_IS_APU)) {
248                 r = amdgpu_uvd_resume(adev);
249                 if (r)
250                         return r;
251         }
252         r = uvd_v6_0_hw_init(adev);
253         if (r)
254                 return r;
255
256         return r;
257 }
258
259 /**
260  * uvd_v6_0_mc_resume - memory controller programming
261  *
262  * @adev: amdgpu_device pointer
263  *
264  * Let the UVD memory controller know it's offsets
265  */
266 static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
267 {
268         uint64_t offset;
269         uint32_t size;
270
271         /* programm memory controller bits 0-27 */
272         WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
273                         lower_32_bits(adev->uvd.gpu_addr));
274         WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
275                         upper_32_bits(adev->uvd.gpu_addr));
276
277         offset = AMDGPU_UVD_FIRMWARE_OFFSET;
278         size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
279         WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
280         WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
281
282         offset += size;
283         size = AMDGPU_UVD_HEAP_SIZE;
284         WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
285         WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
286
287         offset += size;
288         size = AMDGPU_UVD_STACK_SIZE +
289                (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
290         WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
291         WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
292
293         WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
294         WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
295         WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
296
297         WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
298 }
299
300 #if 0
301 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
302                 bool enable)
303 {
304         u32 data, data1;
305
306         data = RREG32(mmUVD_CGC_GATE);
307         data1 = RREG32(mmUVD_SUVD_CGC_GATE);
308         if (enable) {
309                 data |= UVD_CGC_GATE__SYS_MASK |
310                                 UVD_CGC_GATE__UDEC_MASK |
311                                 UVD_CGC_GATE__MPEG2_MASK |
312                                 UVD_CGC_GATE__RBC_MASK |
313                                 UVD_CGC_GATE__LMI_MC_MASK |
314                                 UVD_CGC_GATE__IDCT_MASK |
315                                 UVD_CGC_GATE__MPRD_MASK |
316                                 UVD_CGC_GATE__MPC_MASK |
317                                 UVD_CGC_GATE__LBSI_MASK |
318                                 UVD_CGC_GATE__LRBBM_MASK |
319                                 UVD_CGC_GATE__UDEC_RE_MASK |
320                                 UVD_CGC_GATE__UDEC_CM_MASK |
321                                 UVD_CGC_GATE__UDEC_IT_MASK |
322                                 UVD_CGC_GATE__UDEC_DB_MASK |
323                                 UVD_CGC_GATE__UDEC_MP_MASK |
324                                 UVD_CGC_GATE__WCB_MASK |
325                                 UVD_CGC_GATE__VCPU_MASK |
326                                 UVD_CGC_GATE__SCPU_MASK;
327                 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
328                                 UVD_SUVD_CGC_GATE__SIT_MASK |
329                                 UVD_SUVD_CGC_GATE__SMP_MASK |
330                                 UVD_SUVD_CGC_GATE__SCM_MASK |
331                                 UVD_SUVD_CGC_GATE__SDB_MASK |
332                                 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
333                                 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
334                                 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
335                                 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
336                                 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
337                                 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
338                                 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
339                                 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
340         } else {
341                 data &= ~(UVD_CGC_GATE__SYS_MASK |
342                                 UVD_CGC_GATE__UDEC_MASK |
343                                 UVD_CGC_GATE__MPEG2_MASK |
344                                 UVD_CGC_GATE__RBC_MASK |
345                                 UVD_CGC_GATE__LMI_MC_MASK |
346                                 UVD_CGC_GATE__LMI_UMC_MASK |
347                                 UVD_CGC_GATE__IDCT_MASK |
348                                 UVD_CGC_GATE__MPRD_MASK |
349                                 UVD_CGC_GATE__MPC_MASK |
350                                 UVD_CGC_GATE__LBSI_MASK |
351                                 UVD_CGC_GATE__LRBBM_MASK |
352                                 UVD_CGC_GATE__UDEC_RE_MASK |
353                                 UVD_CGC_GATE__UDEC_CM_MASK |
354                                 UVD_CGC_GATE__UDEC_IT_MASK |
355                                 UVD_CGC_GATE__UDEC_DB_MASK |
356                                 UVD_CGC_GATE__UDEC_MP_MASK |
357                                 UVD_CGC_GATE__WCB_MASK |
358                                 UVD_CGC_GATE__VCPU_MASK |
359                                 UVD_CGC_GATE__SCPU_MASK);
360                 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
361                                 UVD_SUVD_CGC_GATE__SIT_MASK |
362                                 UVD_SUVD_CGC_GATE__SMP_MASK |
363                                 UVD_SUVD_CGC_GATE__SCM_MASK |
364                                 UVD_SUVD_CGC_GATE__SDB_MASK |
365                                 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
366                                 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
367                                 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
368                                 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
369                                 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
370                                 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
371                                 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
372                                 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
373         }
374         WREG32(mmUVD_CGC_GATE, data);
375         WREG32(mmUVD_SUVD_CGC_GATE, data1);
376 }
377 #endif
378
379 /**
380  * uvd_v6_0_start - start UVD block
381  *
382  * @adev: amdgpu_device pointer
383  *
384  * Setup and start the UVD block
385  */
386 static int uvd_v6_0_start(struct amdgpu_device *adev)
387 {
388         struct amdgpu_ring *ring = &adev->uvd.ring;
389         uint32_t rb_bufsz, tmp;
390         uint32_t lmi_swap_cntl;
391         uint32_t mp_swap_cntl;
392         int i, j, r;
393
394         /* disable DPG */
395         WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
396
397         /* disable byte swapping */
398         lmi_swap_cntl = 0;
399         mp_swap_cntl = 0;
400
401         uvd_v6_0_mc_resume(adev);
402
403         /* disable clock gating */
404         WREG32_FIELD(UVD_CGC_CTRL, DYN_CLOCK_MODE, 0);
405
406         /* disable interupt */
407         WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
408
409         /* stall UMC and register bus before resetting VCPU */
410         WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
411         mdelay(1);
412
413         /* put LMI, VCPU, RBC etc... into reset */
414         WREG32(mmUVD_SOFT_RESET,
415                 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
416                 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
417                 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
418                 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
419                 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
420                 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
421                 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
422                 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
423         mdelay(5);
424
425         /* take UVD block out of reset */
426         WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
427         mdelay(5);
428
429         /* initialize UVD memory controller */
430         WREG32(mmUVD_LMI_CTRL,
431                 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
432                 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
433                 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
434                 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
435                 UVD_LMI_CTRL__REQ_MODE_MASK |
436                 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
437
438 #ifdef __BIG_ENDIAN
439         /* swap (8 in 32) RB and IB */
440         lmi_swap_cntl = 0xa;
441         mp_swap_cntl = 0;
442 #endif
443         WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
444         WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
445
446         WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
447         WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
448         WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
449         WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
450         WREG32(mmUVD_MPC_SET_ALU, 0);
451         WREG32(mmUVD_MPC_SET_MUX, 0x88);
452
453         /* take all subblocks out of reset, except VCPU */
454         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
455         mdelay(5);
456
457         /* enable VCPU clock */
458         WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
459
460         /* enable UMC */
461         WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
462
463         /* boot up the VCPU */
464         WREG32(mmUVD_SOFT_RESET, 0);
465         mdelay(10);
466
467         for (i = 0; i < 10; ++i) {
468                 uint32_t status;
469
470                 for (j = 0; j < 100; ++j) {
471                         status = RREG32(mmUVD_STATUS);
472                         if (status & 2)
473                                 break;
474                         mdelay(10);
475                 }
476                 r = 0;
477                 if (status & 2)
478                         break;
479
480                 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
481                 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
482                 mdelay(10);
483                 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
484                 mdelay(10);
485                 r = -1;
486         }
487
488         if (r) {
489                 DRM_ERROR("UVD not responding, giving up!!!\n");
490                 return r;
491         }
492         /* enable master interrupt */
493         WREG32_P(mmUVD_MASTINT_EN,
494                 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
495                 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
496
497         /* clear the bit 4 of UVD_STATUS */
498         WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
499
500         /* force RBC into idle state */
501         rb_bufsz = order_base_2(ring->ring_size);
502         tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
503         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
504         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
505         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
506         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
507         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
508         WREG32(mmUVD_RBC_RB_CNTL, tmp);
509
510         /* set the write pointer delay */
511         WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
512
513         /* set the wb address */
514         WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
515
516         /* programm the RB_BASE for ring buffer */
517         WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
518                         lower_32_bits(ring->gpu_addr));
519         WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
520                         upper_32_bits(ring->gpu_addr));
521
522         /* Initialize the ring buffer's read and write pointers */
523         WREG32(mmUVD_RBC_RB_RPTR, 0);
524
525         ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
526         WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
527
528         WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
529
530         return 0;
531 }
532
533 /**
534  * uvd_v6_0_stop - stop UVD block
535  *
536  * @adev: amdgpu_device pointer
537  *
538  * stop the UVD block
539  */
540 static void uvd_v6_0_stop(struct amdgpu_device *adev)
541 {
542         /* force RBC into idle state */
543         WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
544
545         /* Stall UMC and register bus before resetting VCPU */
546         WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
547         mdelay(1);
548
549         /* put VCPU into reset */
550         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
551         mdelay(5);
552
553         /* disable VCPU clock */
554         WREG32(mmUVD_VCPU_CNTL, 0x0);
555
556         /* Unstall UMC and register bus */
557         WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
558 }
559
560 /**
561  * uvd_v6_0_ring_emit_fence - emit an fence & trap command
562  *
563  * @ring: amdgpu_ring pointer
564  * @fence: fence to emit
565  *
566  * Write a fence and a trap command to the ring.
567  */
568 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
569                                      unsigned flags)
570 {
571         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
572
573         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
574         amdgpu_ring_write(ring, seq);
575         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
576         amdgpu_ring_write(ring, addr & 0xffffffff);
577         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
578         amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
579         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
580         amdgpu_ring_write(ring, 0);
581
582         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
583         amdgpu_ring_write(ring, 0);
584         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
585         amdgpu_ring_write(ring, 0);
586         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
587         amdgpu_ring_write(ring, 2);
588 }
589
590 /**
591  * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
592  *
593  * @ring: amdgpu_ring pointer
594  *
595  * Emits an hdp flush.
596  */
597 static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
598 {
599         amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
600         amdgpu_ring_write(ring, 0);
601 }
602
603 /**
604  * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
605  *
606  * @ring: amdgpu_ring pointer
607  *
608  * Emits an hdp invalidate.
609  */
610 static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
611 {
612         amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
613         amdgpu_ring_write(ring, 1);
614 }
615
616 /**
617  * uvd_v6_0_ring_test_ring - register write test
618  *
619  * @ring: amdgpu_ring pointer
620  *
621  * Test if we can successfully write to the context register
622  */
623 static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
624 {
625         struct amdgpu_device *adev = ring->adev;
626         uint32_t tmp = 0;
627         unsigned i;
628         int r;
629
630         WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
631         r = amdgpu_ring_alloc(ring, 3);
632         if (r) {
633                 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
634                           ring->idx, r);
635                 return r;
636         }
637         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
638         amdgpu_ring_write(ring, 0xDEADBEEF);
639         amdgpu_ring_commit(ring);
640         for (i = 0; i < adev->usec_timeout; i++) {
641                 tmp = RREG32(mmUVD_CONTEXT_ID);
642                 if (tmp == 0xDEADBEEF)
643                         break;
644                 DRM_UDELAY(1);
645         }
646
647         if (i < adev->usec_timeout) {
648                 DRM_INFO("ring test on %d succeeded in %d usecs\n",
649                          ring->idx, i);
650         } else {
651                 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
652                           ring->idx, tmp);
653                 r = -EINVAL;
654         }
655         return r;
656 }
657
658 /**
659  * uvd_v6_0_ring_emit_ib - execute indirect buffer
660  *
661  * @ring: amdgpu_ring pointer
662  * @ib: indirect buffer to execute
663  *
664  * Write ring commands to execute the indirect buffer
665  */
666 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
667                                   struct amdgpu_ib *ib,
668                                   unsigned vm_id, bool ctx_switch)
669 {
670         amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
671         amdgpu_ring_write(ring, vm_id);
672
673         amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
674         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
675         amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
676         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
677         amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
678         amdgpu_ring_write(ring, ib->length_dw);
679 }
680
681 static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
682                                          unsigned vm_id, uint64_t pd_addr)
683 {
684         uint32_t reg;
685
686         if (vm_id < 8)
687                 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
688         else
689                 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
690
691         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
692         amdgpu_ring_write(ring, reg << 2);
693         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
694         amdgpu_ring_write(ring, pd_addr >> 12);
695         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
696         amdgpu_ring_write(ring, 0x8);
697
698         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
699         amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
700         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
701         amdgpu_ring_write(ring, 1 << vm_id);
702         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
703         amdgpu_ring_write(ring, 0x8);
704
705         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
706         amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
707         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
708         amdgpu_ring_write(ring, 0);
709         amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
710         amdgpu_ring_write(ring, 1 << vm_id); /* mask */
711         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
712         amdgpu_ring_write(ring, 0xC);
713 }
714
715 static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
716 {
717         uint32_t seq = ring->fence_drv.sync_seq;
718         uint64_t addr = ring->fence_drv.gpu_addr;
719
720         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
721         amdgpu_ring_write(ring, lower_32_bits(addr));
722         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
723         amdgpu_ring_write(ring, upper_32_bits(addr));
724         amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
725         amdgpu_ring_write(ring, 0xffffffff); /* mask */
726         amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
727         amdgpu_ring_write(ring, seq);
728         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
729         amdgpu_ring_write(ring, 0xE);
730 }
731
732 static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
733 {
734         return
735                 8; /* uvd_v6_0_ring_emit_ib */
736 }
737
738 static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
739 {
740         return
741                 2 + /* uvd_v6_0_ring_emit_hdp_flush */
742                 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
743                 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
744                 14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
745 }
746
747 static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
748 {
749         return
750                 2 + /* uvd_v6_0_ring_emit_hdp_flush */
751                 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
752                 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
753                 20 + /* uvd_v6_0_ring_emit_vm_flush */
754                 14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
755 }
756
757 static bool uvd_v6_0_is_idle(void *handle)
758 {
759         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
760
761         return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
762 }
763
764 static int uvd_v6_0_wait_for_idle(void *handle)
765 {
766         unsigned i;
767         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
768
769         for (i = 0; i < adev->usec_timeout; i++) {
770                 if (uvd_v6_0_is_idle(handle))
771                         return 0;
772         }
773         return -ETIMEDOUT;
774 }
775
776 #define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
777 static bool uvd_v6_0_check_soft_reset(void *handle)
778 {
779         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
780         u32 srbm_soft_reset = 0;
781         u32 tmp = RREG32(mmSRBM_STATUS);
782
783         if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
784             REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
785             (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
786                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
787
788         if (srbm_soft_reset) {
789                 adev->uvd.srbm_soft_reset = srbm_soft_reset;
790                 return true;
791         } else {
792                 adev->uvd.srbm_soft_reset = 0;
793                 return false;
794         }
795 }
796
797 static int uvd_v6_0_pre_soft_reset(void *handle)
798 {
799         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
800
801         if (!adev->uvd.srbm_soft_reset)
802                 return 0;
803
804         uvd_v6_0_stop(adev);
805         return 0;
806 }
807
808 static int uvd_v6_0_soft_reset(void *handle)
809 {
810         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
811         u32 srbm_soft_reset;
812
813         if (!adev->uvd.srbm_soft_reset)
814                 return 0;
815         srbm_soft_reset = adev->uvd.srbm_soft_reset;
816
817         if (srbm_soft_reset) {
818                 u32 tmp;
819
820                 tmp = RREG32(mmSRBM_SOFT_RESET);
821                 tmp |= srbm_soft_reset;
822                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
823                 WREG32(mmSRBM_SOFT_RESET, tmp);
824                 tmp = RREG32(mmSRBM_SOFT_RESET);
825
826                 udelay(50);
827
828                 tmp &= ~srbm_soft_reset;
829                 WREG32(mmSRBM_SOFT_RESET, tmp);
830                 tmp = RREG32(mmSRBM_SOFT_RESET);
831
832                 /* Wait a little for things to settle down */
833                 udelay(50);
834         }
835
836         return 0;
837 }
838
839 static int uvd_v6_0_post_soft_reset(void *handle)
840 {
841         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
842
843         if (!adev->uvd.srbm_soft_reset)
844                 return 0;
845
846         mdelay(5);
847
848         return uvd_v6_0_start(adev);
849 }
850
851 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
852                                         struct amdgpu_irq_src *source,
853                                         unsigned type,
854                                         enum amdgpu_interrupt_state state)
855 {
856         // TODO
857         return 0;
858 }
859
860 static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
861                                       struct amdgpu_irq_src *source,
862                                       struct amdgpu_iv_entry *entry)
863 {
864         DRM_DEBUG("IH: UVD TRAP\n");
865         amdgpu_fence_process(&adev->uvd.ring);
866         return 0;
867 }
868
869 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
870 {
871         uint32_t data, data1, data2, suvd_flags;
872
873         data = RREG32(mmUVD_CGC_CTRL);
874         data1 = RREG32(mmUVD_SUVD_CGC_GATE);
875         data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
876
877         data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
878                   UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
879
880         suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
881                      UVD_SUVD_CGC_GATE__SIT_MASK |
882                      UVD_SUVD_CGC_GATE__SMP_MASK |
883                      UVD_SUVD_CGC_GATE__SCM_MASK |
884                      UVD_SUVD_CGC_GATE__SDB_MASK;
885
886         data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
887                 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
888                 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
889
890         data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
891                         UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
892                         UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
893                         UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
894                         UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
895                         UVD_CGC_CTRL__SYS_MODE_MASK |
896                         UVD_CGC_CTRL__UDEC_MODE_MASK |
897                         UVD_CGC_CTRL__MPEG2_MODE_MASK |
898                         UVD_CGC_CTRL__REGS_MODE_MASK |
899                         UVD_CGC_CTRL__RBC_MODE_MASK |
900                         UVD_CGC_CTRL__LMI_MC_MODE_MASK |
901                         UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
902                         UVD_CGC_CTRL__IDCT_MODE_MASK |
903                         UVD_CGC_CTRL__MPRD_MODE_MASK |
904                         UVD_CGC_CTRL__MPC_MODE_MASK |
905                         UVD_CGC_CTRL__LBSI_MODE_MASK |
906                         UVD_CGC_CTRL__LRBBM_MODE_MASK |
907                         UVD_CGC_CTRL__WCB_MODE_MASK |
908                         UVD_CGC_CTRL__VCPU_MODE_MASK |
909                         UVD_CGC_CTRL__JPEG_MODE_MASK |
910                         UVD_CGC_CTRL__SCPU_MODE_MASK |
911                         UVD_CGC_CTRL__JPEG2_MODE_MASK);
912         data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
913                         UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
914                         UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
915                         UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
916                         UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
917         data1 |= suvd_flags;
918
919         WREG32(mmUVD_CGC_CTRL, data);
920         WREG32(mmUVD_CGC_GATE, 0);
921         WREG32(mmUVD_SUVD_CGC_GATE, data1);
922         WREG32(mmUVD_SUVD_CGC_CTRL, data2);
923 }
924
925 #if 0
926 static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
927 {
928         uint32_t data, data1, cgc_flags, suvd_flags;
929
930         data = RREG32(mmUVD_CGC_GATE);
931         data1 = RREG32(mmUVD_SUVD_CGC_GATE);
932
933         cgc_flags = UVD_CGC_GATE__SYS_MASK |
934                 UVD_CGC_GATE__UDEC_MASK |
935                 UVD_CGC_GATE__MPEG2_MASK |
936                 UVD_CGC_GATE__RBC_MASK |
937                 UVD_CGC_GATE__LMI_MC_MASK |
938                 UVD_CGC_GATE__IDCT_MASK |
939                 UVD_CGC_GATE__MPRD_MASK |
940                 UVD_CGC_GATE__MPC_MASK |
941                 UVD_CGC_GATE__LBSI_MASK |
942                 UVD_CGC_GATE__LRBBM_MASK |
943                 UVD_CGC_GATE__UDEC_RE_MASK |
944                 UVD_CGC_GATE__UDEC_CM_MASK |
945                 UVD_CGC_GATE__UDEC_IT_MASK |
946                 UVD_CGC_GATE__UDEC_DB_MASK |
947                 UVD_CGC_GATE__UDEC_MP_MASK |
948                 UVD_CGC_GATE__WCB_MASK |
949                 UVD_CGC_GATE__VCPU_MASK |
950                 UVD_CGC_GATE__SCPU_MASK |
951                 UVD_CGC_GATE__JPEG_MASK |
952                 UVD_CGC_GATE__JPEG2_MASK;
953
954         suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
955                                 UVD_SUVD_CGC_GATE__SIT_MASK |
956                                 UVD_SUVD_CGC_GATE__SMP_MASK |
957                                 UVD_SUVD_CGC_GATE__SCM_MASK |
958                                 UVD_SUVD_CGC_GATE__SDB_MASK;
959
960         data |= cgc_flags;
961         data1 |= suvd_flags;
962
963         WREG32(mmUVD_CGC_GATE, data);
964         WREG32(mmUVD_SUVD_CGC_GATE, data1);
965 }
966 #endif
967
968 static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable)
969 {
970         u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
971
972         if (enable)
973                 tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
974                         GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
975         else
976                 tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
977                          GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
978
979         WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
980 }
981
982 static int uvd_v6_0_set_clockgating_state(void *handle,
983                                           enum amd_clockgating_state state)
984 {
985         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
986
987         if (adev->asic_type == CHIP_FIJI ||
988             adev->asic_type == CHIP_POLARIS10)
989                 uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false);
990
991         if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
992                 return 0;
993
994         if (state == AMD_CG_STATE_GATE) {
995                 /* disable HW gating and enable Sw gating */
996                 uvd_v6_0_set_sw_clock_gating(adev);
997         } else {
998                 /* wait for STATUS to clear */
999                 if (uvd_v6_0_wait_for_idle(handle))
1000                         return -EBUSY;
1001
1002                 /* enable HW gates because UVD is idle */
1003 /*              uvd_v6_0_set_hw_clock_gating(adev); */
1004         }
1005
1006         return 0;
1007 }
1008
1009 static int uvd_v6_0_set_powergating_state(void *handle,
1010                                           enum amd_powergating_state state)
1011 {
1012         /* This doesn't actually powergate the UVD block.
1013          * That's done in the dpm code via the SMC.  This
1014          * just re-inits the block as necessary.  The actual
1015          * gating still happens in the dpm code.  We should
1016          * revisit this when there is a cleaner line between
1017          * the smc and the hw blocks
1018          */
1019         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1020
1021         if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1022                 return 0;
1023
1024         WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1025
1026         if (state == AMD_PG_STATE_GATE) {
1027                 uvd_v6_0_stop(adev);
1028                 return 0;
1029         } else {
1030                 return uvd_v6_0_start(adev);
1031         }
1032 }
1033
1034 const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1035         .name = "uvd_v6_0",
1036         .early_init = uvd_v6_0_early_init,
1037         .late_init = NULL,
1038         .sw_init = uvd_v6_0_sw_init,
1039         .sw_fini = uvd_v6_0_sw_fini,
1040         .hw_init = uvd_v6_0_hw_init,
1041         .hw_fini = uvd_v6_0_hw_fini,
1042         .suspend = uvd_v6_0_suspend,
1043         .resume = uvd_v6_0_resume,
1044         .is_idle = uvd_v6_0_is_idle,
1045         .wait_for_idle = uvd_v6_0_wait_for_idle,
1046         .check_soft_reset = uvd_v6_0_check_soft_reset,
1047         .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1048         .soft_reset = uvd_v6_0_soft_reset,
1049         .post_soft_reset = uvd_v6_0_post_soft_reset,
1050         .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1051         .set_powergating_state = uvd_v6_0_set_powergating_state,
1052 };
1053
1054 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1055         .get_rptr = uvd_v6_0_ring_get_rptr,
1056         .get_wptr = uvd_v6_0_ring_get_wptr,
1057         .set_wptr = uvd_v6_0_ring_set_wptr,
1058         .parse_cs = amdgpu_uvd_ring_parse_cs,
1059         .emit_ib = uvd_v6_0_ring_emit_ib,
1060         .emit_fence = uvd_v6_0_ring_emit_fence,
1061         .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1062         .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1063         .test_ring = uvd_v6_0_ring_test_ring,
1064         .test_ib = amdgpu_uvd_ring_test_ib,
1065         .insert_nop = amdgpu_ring_insert_nop,
1066         .pad_ib = amdgpu_ring_generic_pad_ib,
1067         .begin_use = amdgpu_uvd_ring_begin_use,
1068         .end_use = amdgpu_uvd_ring_end_use,
1069         .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
1070         .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
1071 };
1072
1073 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1074         .get_rptr = uvd_v6_0_ring_get_rptr,
1075         .get_wptr = uvd_v6_0_ring_get_wptr,
1076         .set_wptr = uvd_v6_0_ring_set_wptr,
1077         .parse_cs = NULL,
1078         .emit_ib = uvd_v6_0_ring_emit_ib,
1079         .emit_fence = uvd_v6_0_ring_emit_fence,
1080         .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1081         .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1082         .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1083         .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1084         .test_ring = uvd_v6_0_ring_test_ring,
1085         .test_ib = amdgpu_uvd_ring_test_ib,
1086         .insert_nop = amdgpu_ring_insert_nop,
1087         .pad_ib = amdgpu_ring_generic_pad_ib,
1088         .begin_use = amdgpu_uvd_ring_begin_use,
1089         .end_use = amdgpu_uvd_ring_end_use,
1090         .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
1091         .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
1092 };
1093
1094 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1095 {
1096         if (adev->asic_type >= CHIP_POLARIS10) {
1097                 adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
1098                 DRM_INFO("UVD is enabled in VM mode\n");
1099         } else {
1100                 adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
1101                 DRM_INFO("UVD is enabled in physical mode\n");
1102         }
1103 }
1104
1105 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1106         .set = uvd_v6_0_set_interrupt_state,
1107         .process = uvd_v6_0_process_interrupt,
1108 };
1109
1110 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1111 {
1112         adev->uvd.irq.num_types = 1;
1113         adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
1114 }