2 * Copyright 2011 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <deathsimple@vodafone.de>
31 #include <linux/firmware.h>
32 #include <linux/module.h>
39 /* 1 second timeout */
40 #define UVD_IDLE_TIMEOUT_MS 1000
43 #define FIRMWARE_R600 "/*(DEBLOBBED)*/"
44 #define FIRMWARE_RS780 "/*(DEBLOBBED)*/"
45 #define FIRMWARE_RV770 "/*(DEBLOBBED)*/"
46 #define FIRMWARE_RV710 "/*(DEBLOBBED)*/"
47 #define FIRMWARE_CYPRESS "/*(DEBLOBBED)*/"
48 #define FIRMWARE_SUMO "/*(DEBLOBBED)*/"
49 #define FIRMWARE_TAHITI "/*(DEBLOBBED)*/"
50 #define FIRMWARE_BONAIRE "/*(DEBLOBBED)*/"
54 static void radeon_uvd_idle_work_handler(struct work_struct *work);
56 int radeon_uvd_init(struct radeon_device *rdev)
58 unsigned long bo_size;
62 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
64 switch (rdev->family) {
70 fw_name = FIRMWARE_R600;
75 fw_name = FIRMWARE_RS780;
79 fw_name = FIRMWARE_RV770;
85 fw_name = FIRMWARE_RV710;
93 fw_name = FIRMWARE_CYPRESS;
103 fw_name = FIRMWARE_SUMO;
111 fw_name = FIRMWARE_TAHITI;
119 fw_name = FIRMWARE_BONAIRE;
126 r = reject_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
128 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
133 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
134 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
135 RADEON_GPU_PAGE_SIZE;
136 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
137 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
138 NULL, &rdev->uvd.vcpu_bo);
140 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
144 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
146 radeon_bo_unref(&rdev->uvd.vcpu_bo);
147 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
151 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
152 &rdev->uvd.gpu_addr);
154 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
155 radeon_bo_unref(&rdev->uvd.vcpu_bo);
156 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
160 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
162 dev_err(rdev->dev, "(%d) UVD map failed\n", r);
166 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
168 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
169 atomic_set(&rdev->uvd.handles[i], 0);
170 rdev->uvd.filp[i] = NULL;
171 rdev->uvd.img_size[i] = 0;
177 void radeon_uvd_fini(struct radeon_device *rdev)
181 if (rdev->uvd.vcpu_bo == NULL)
184 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
186 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
187 radeon_bo_unpin(rdev->uvd.vcpu_bo);
188 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
191 radeon_bo_unref(&rdev->uvd.vcpu_bo);
193 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
195 release_firmware(rdev->uvd_fw);
198 int radeon_uvd_suspend(struct radeon_device *rdev)
202 if (rdev->uvd.vcpu_bo == NULL)
205 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
206 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
208 struct radeon_fence *fence;
210 radeon_uvd_note_usage(rdev);
212 r = radeon_uvd_get_destroy_msg(rdev,
213 R600_RING_TYPE_UVD_INDEX, handle, &fence);
215 DRM_ERROR("Error destroying UVD (%d)!\n", r);
219 radeon_fence_wait(fence, false);
220 radeon_fence_unref(&fence);
222 rdev->uvd.filp[i] = NULL;
223 atomic_set(&rdev->uvd.handles[i], 0);
230 int radeon_uvd_resume(struct radeon_device *rdev)
235 if (rdev->uvd.vcpu_bo == NULL)
238 memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
240 size = radeon_bo_size(rdev->uvd.vcpu_bo);
241 size -= rdev->uvd_fw->size;
243 ptr = rdev->uvd.cpu_addr;
244 ptr += rdev->uvd_fw->size;
246 memset_io((void __iomem *)ptr, 0, size);
251 void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
252 uint32_t allowed_domains)
256 for (i = 0; i < rbo->placement.num_placement; ++i) {
257 rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
258 rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
261 /* If it must be in VRAM it must be in the first segment as well */
262 if (allowed_domains == RADEON_GEM_DOMAIN_VRAM)
265 /* abort if we already have more than one placement */
266 if (rbo->placement.num_placement > 1)
269 /* add another 256MB segment */
270 rbo->placements[1] = rbo->placements[0];
271 rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
272 rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
273 rbo->placement.num_placement++;
274 rbo->placement.num_busy_placement++;
277 void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
280 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
281 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
282 if (handle != 0 && rdev->uvd.filp[i] == filp) {
283 struct radeon_fence *fence;
285 radeon_uvd_note_usage(rdev);
287 r = radeon_uvd_get_destroy_msg(rdev,
288 R600_RING_TYPE_UVD_INDEX, handle, &fence);
290 DRM_ERROR("Error destroying UVD (%d)!\n", r);
294 radeon_fence_wait(fence, false);
295 radeon_fence_unref(&fence);
297 rdev->uvd.filp[i] = NULL;
298 atomic_set(&rdev->uvd.handles[i], 0);
303 static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
305 unsigned stream_type = msg[4];
306 unsigned width = msg[6];
307 unsigned height = msg[7];
308 unsigned dpb_size = msg[9];
309 unsigned pitch = msg[28];
311 unsigned width_in_mb = width / 16;
312 unsigned height_in_mb = ALIGN(height / 16, 2);
314 unsigned image_size, tmp, min_dpb_size;
316 image_size = width * height;
317 image_size += image_size / 2;
318 image_size = ALIGN(image_size, 1024);
320 switch (stream_type) {
323 /* reference picture buffer */
324 min_dpb_size = image_size * 17;
326 /* macroblock context buffer */
327 min_dpb_size += width_in_mb * height_in_mb * 17 * 192;
329 /* IT surface buffer */
330 min_dpb_size += width_in_mb * height_in_mb * 32;
335 /* reference picture buffer */
336 min_dpb_size = image_size * 3;
339 min_dpb_size += width_in_mb * height_in_mb * 128;
341 /* IT surface buffer */
342 min_dpb_size += width_in_mb * 64;
344 /* DB surface buffer */
345 min_dpb_size += width_in_mb * 128;
348 tmp = max(width_in_mb, height_in_mb);
349 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
354 /* reference picture buffer */
355 min_dpb_size = image_size * 3;
360 /* reference picture buffer */
361 min_dpb_size = image_size * 3;
364 min_dpb_size += width_in_mb * height_in_mb * 64;
366 /* IT surface buffer */
367 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
371 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
376 DRM_ERROR("Invalid UVD decoding target pitch!\n");
380 if (dpb_size < min_dpb_size) {
381 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
382 dpb_size, min_dpb_size);
386 buf_sizes[0x1] = dpb_size;
387 buf_sizes[0x2] = image_size;
391 static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
392 unsigned stream_type)
394 switch (stream_type) {
397 /* always supported */
402 /* only since UVD 3 */
403 if (p->rdev->family >= CHIP_PALM)
408 DRM_ERROR("UVD codec not supported by hardware %d!\n",
414 static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
415 unsigned offset, unsigned buf_sizes[])
417 int32_t *msg, msg_type, handle;
418 unsigned img_size = 0;
425 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
429 f = reservation_object_get_excl(bo->tbo.resv);
431 r = radeon_fence_wait((struct radeon_fence *)f, false);
433 DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
438 r = radeon_bo_kmap(bo, &ptr);
440 DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
450 DRM_ERROR("Invalid UVD handle!\n");
456 /* it's a create msg, calc image size (width * height) */
457 img_size = msg[7] * msg[8];
459 r = radeon_uvd_validate_codec(p, msg[4]);
460 radeon_bo_kunmap(bo);
464 /* try to alloc a new handle */
465 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
466 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
467 DRM_ERROR("Handle 0x%x already in use!\n", handle);
471 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
472 p->rdev->uvd.filp[i] = p->filp;
473 p->rdev->uvd.img_size[i] = img_size;
478 DRM_ERROR("No more free UVD handles!\n");
482 /* it's a decode msg, validate codec and calc buffer sizes */
483 r = radeon_uvd_validate_codec(p, msg[4]);
485 r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
486 radeon_bo_kunmap(bo);
490 /* validate the handle */
491 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
492 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
493 if (p->rdev->uvd.filp[i] != p->filp) {
494 DRM_ERROR("UVD handle collision detected!\n");
501 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
505 /* it's a destroy msg, free the handle */
506 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
507 atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
508 radeon_bo_kunmap(bo);
513 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
521 static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
522 int data0, int data1,
523 unsigned buf_sizes[], bool *has_msg_cmd)
525 struct radeon_cs_chunk *relocs_chunk;
526 struct radeon_bo_list *reloc;
527 unsigned idx, cmd, offset;
531 relocs_chunk = p->chunk_relocs;
532 offset = radeon_get_ib_value(p, data0);
533 idx = radeon_get_ib_value(p, data1);
534 if (idx >= relocs_chunk->length_dw) {
535 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
536 idx, relocs_chunk->length_dw);
540 reloc = &p->relocs[(idx / 4)];
541 start = reloc->gpu_offset;
542 end = start + radeon_bo_size(reloc->robj);
545 p->ib.ptr[data0] = start & 0xFFFFFFFF;
546 p->ib.ptr[data1] = start >> 32;
548 cmd = radeon_get_ib_value(p, p->idx) >> 1;
552 DRM_ERROR("invalid reloc offset %X!\n", offset);
555 if ((end - start) < buf_sizes[cmd]) {
556 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
557 (unsigned)(end - start), buf_sizes[cmd]);
561 } else if (cmd != 0x100) {
562 DRM_ERROR("invalid UVD command %X!\n", cmd);
566 if ((start >> 28) != ((end - 1) >> 28)) {
567 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
572 /* TODO: is this still necessary on NI+ ? */
573 if ((cmd == 0 || cmd == 0x3) &&
574 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
575 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
582 DRM_ERROR("More than one message in a UVD-IB!\n");
586 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
589 } else if (!*has_msg_cmd) {
590 DRM_ERROR("Message needed before other commands are send!\n");
597 static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
598 struct radeon_cs_packet *pkt,
599 int *data0, int *data1,
600 unsigned buf_sizes[],
606 for (i = 0; i <= pkt->count; ++i) {
607 switch (pkt->reg + i*4) {
608 case UVD_GPCOM_VCPU_DATA0:
611 case UVD_GPCOM_VCPU_DATA1:
614 case UVD_GPCOM_VCPU_CMD:
615 r = radeon_uvd_cs_reloc(p, *data0, *data1,
616 buf_sizes, has_msg_cmd);
620 case UVD_ENGINE_CNTL:
623 DRM_ERROR("Invalid reg 0x%X!\n",
632 int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
634 struct radeon_cs_packet pkt;
635 int r, data0 = 0, data1 = 0;
637 /* does the IB has a msg command */
638 bool has_msg_cmd = false;
640 /* minimum buffer sizes */
641 unsigned buf_sizes[] = {
643 [0x00000001] = 32 * 1024 * 1024,
644 [0x00000002] = 2048 * 1152 * 3,
648 if (p->chunk_ib->length_dw % 16) {
649 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
650 p->chunk_ib->length_dw);
654 if (p->chunk_relocs == NULL) {
655 DRM_ERROR("No relocation chunk !\n");
661 r = radeon_cs_packet_parse(p, &pkt, p->idx);
665 case RADEON_PACKET_TYPE0:
666 r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
667 buf_sizes, &has_msg_cmd);
671 case RADEON_PACKET_TYPE2:
672 p->idx += pkt.count + 2;
675 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
678 } while (p->idx < p->chunk_ib->length_dw);
681 DRM_ERROR("UVD-IBs need a msg command!\n");
688 static int radeon_uvd_send_msg(struct radeon_device *rdev,
689 int ring, uint64_t addr,
690 struct radeon_fence **fence)
695 r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
699 ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
701 ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
702 ib.ptr[3] = addr >> 32;
703 ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
705 for (i = 6; i < 16; ++i)
706 ib.ptr[i] = PACKET2(0);
709 r = radeon_ib_schedule(rdev, &ib, NULL, false);
712 *fence = radeon_fence_ref(ib.fence);
714 radeon_ib_free(rdev, &ib);
718 /* multiple fence commands without any stream commands in between can
719 crash the vcpu so just try to emmit a dummy create/destroy msg to
721 int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
722 uint32_t handle, struct radeon_fence **fence)
724 /* we use the last page of the vcpu bo for the UVD message */
725 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
726 RADEON_GPU_PAGE_SIZE;
728 uint32_t *msg = rdev->uvd.cpu_addr + offs;
729 uint64_t addr = rdev->uvd.gpu_addr + offs;
733 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
737 /* stitch together an UVD create msg */
738 msg[0] = cpu_to_le32(0x00000de4);
739 msg[1] = cpu_to_le32(0x00000000);
740 msg[2] = cpu_to_le32(handle);
741 msg[3] = cpu_to_le32(0x00000000);
742 msg[4] = cpu_to_le32(0x00000000);
743 msg[5] = cpu_to_le32(0x00000000);
744 msg[6] = cpu_to_le32(0x00000000);
745 msg[7] = cpu_to_le32(0x00000780);
746 msg[8] = cpu_to_le32(0x00000440);
747 msg[9] = cpu_to_le32(0x00000000);
748 msg[10] = cpu_to_le32(0x01b37000);
749 for (i = 11; i < 1024; ++i)
750 msg[i] = cpu_to_le32(0x0);
752 r = radeon_uvd_send_msg(rdev, ring, addr, fence);
753 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
757 int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
758 uint32_t handle, struct radeon_fence **fence)
760 /* we use the last page of the vcpu bo for the UVD message */
761 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
762 RADEON_GPU_PAGE_SIZE;
764 uint32_t *msg = rdev->uvd.cpu_addr + offs;
765 uint64_t addr = rdev->uvd.gpu_addr + offs;
769 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
773 /* stitch together an UVD destroy msg */
774 msg[0] = cpu_to_le32(0x00000de4);
775 msg[1] = cpu_to_le32(0x00000002);
776 msg[2] = cpu_to_le32(handle);
777 msg[3] = cpu_to_le32(0x00000000);
778 for (i = 4; i < 1024; ++i)
779 msg[i] = cpu_to_le32(0x0);
781 r = radeon_uvd_send_msg(rdev, ring, addr, fence);
782 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
787 * radeon_uvd_count_handles - count number of open streams
789 * @rdev: radeon_device pointer
790 * @sd: number of SD streams
791 * @hd: number of HD streams
793 * Count the number of open SD/HD streams as a hint for power mangement
795 static void radeon_uvd_count_handles(struct radeon_device *rdev,
796 unsigned *sd, unsigned *hd)
803 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
804 if (!atomic_read(&rdev->uvd.handles[i]))
807 if (rdev->uvd.img_size[i] >= 720*576)
814 static void radeon_uvd_idle_work_handler(struct work_struct *work)
816 struct radeon_device *rdev =
817 container_of(work, struct radeon_device, uvd.idle_work.work);
819 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
820 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
821 radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd,
823 radeon_dpm_enable_uvd(rdev, false);
825 radeon_set_uvd_clocks(rdev, 0, 0);
828 schedule_delayed_work(&rdev->uvd.idle_work,
829 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
833 void radeon_uvd_note_usage(struct radeon_device *rdev)
835 bool streams_changed = false;
836 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
837 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
838 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
840 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
841 unsigned hd = 0, sd = 0;
842 radeon_uvd_count_handles(rdev, &sd, &hd);
843 if ((rdev->pm.dpm.sd != sd) ||
844 (rdev->pm.dpm.hd != hd)) {
845 rdev->pm.dpm.sd = sd;
846 rdev->pm.dpm.hd = hd;
847 /* disable this for now */
848 /*streams_changed = true;*/
852 if (set_clocks || streams_changed) {
853 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
854 radeon_dpm_enable_uvd(rdev, true);
856 radeon_set_uvd_clocks(rdev, 53300, 40000);
861 static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
862 unsigned target_freq,
866 unsigned post_div = vco_freq / target_freq;
868 /* adjust to post divider minimum value */
869 if (post_div < pd_min)
872 /* we alway need a frequency less than or equal the target */
873 if ((vco_freq / post_div) > target_freq)
876 /* post dividers above a certain value must be even */
877 if (post_div > pd_even && post_div % 2)
884 * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
886 * @rdev: radeon_device pointer
889 * @vco_min: minimum VCO frequency
890 * @vco_max: maximum VCO frequency
891 * @fb_factor: factor to multiply vco freq with
892 * @fb_mask: limit and bitmask for feedback divider
893 * @pd_min: post divider minimum
894 * @pd_max: post divider maximum
895 * @pd_even: post divider must be even above this value
896 * @optimal_fb_div: resulting feedback divider
897 * @optimal_vclk_div: resulting vclk post divider
898 * @optimal_dclk_div: resulting dclk post divider
900 * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
901 * Returns zero on success -EINVAL on error.
903 int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
904 unsigned vclk, unsigned dclk,
905 unsigned vco_min, unsigned vco_max,
906 unsigned fb_factor, unsigned fb_mask,
907 unsigned pd_min, unsigned pd_max,
909 unsigned *optimal_fb_div,
910 unsigned *optimal_vclk_div,
911 unsigned *optimal_dclk_div)
913 unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
915 /* start off with something large */
916 unsigned optimal_score = ~0;
918 /* loop through vco from low to high */
919 vco_min = max(max(vco_min, vclk), dclk);
920 for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
922 uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
923 unsigned vclk_div, dclk_div, score;
925 do_div(fb_div, ref_freq);
927 /* fb div out of range ? */
928 if (fb_div > fb_mask)
929 break; /* it can oly get worse */
933 /* calc vclk divider with current vco freq */
934 vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
936 if (vclk_div > pd_max)
937 break; /* vco is too big, it has to stop */
939 /* calc dclk divider with current vco freq */
940 dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
942 if (dclk_div > pd_max)
943 break; /* vco is too big, it has to stop */
945 /* calc score with current vco freq */
946 score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
948 /* determine if this vco setting is better than current optimal settings */
949 if (score < optimal_score) {
950 *optimal_fb_div = fb_div;
951 *optimal_vclk_div = vclk_div;
952 *optimal_dclk_div = dclk_div;
953 optimal_score = score;
954 if (optimal_score == 0)
955 break; /* it can't get better than this */
959 /* did we found a valid setup ? */
960 if (optimal_score == ~0)
966 int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
967 unsigned cg_upll_func_cntl)
971 /* make sure UPLL_CTLREQ is deasserted */
972 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
976 /* assert UPLL_CTLREQ */
977 WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
979 /* wait for CTLACK and CTLACK2 to get asserted */
980 for (i = 0; i < 100; ++i) {
981 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
982 if ((RREG32(cg_upll_func_cntl) & mask) == mask)
987 /* deassert UPLL_CTLREQ */
988 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
991 DRM_ERROR("Timeout setting UVD clocks!\n");