1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
7 #include <linux/delay.h>
8 #include <linux/host1x.h>
9 #include <linux/module.h>
11 #include <linux/of_device.h>
12 #include <linux/of_graph.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/reset.h>
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_fourcc.h>
20 #include <drm/drm_probe_helper.h>
28 static const u32 tegra_shared_plane_formats[] = {
52 static const u64 tegra_shared_plane_modifiers[] = {
53 DRM_FORMAT_MOD_LINEAR,
54 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
55 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
56 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
57 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
58 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
59 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
61 * The GPU sector layout is only supported on Tegra194, but these will
62 * be filtered out later on by ->format_mod_supported() on SoCs where
65 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
66 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
67 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
68 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
69 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
70 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
72 DRM_FORMAT_MOD_INVALID
75 static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
78 if (offset >= 0x500 && offset <= 0x581) {
79 offset = 0x000 + (offset - 0x500);
80 return plane->offset + offset;
83 if (offset >= 0x700 && offset <= 0x73c) {
84 offset = 0x180 + (offset - 0x700);
85 return plane->offset + offset;
88 if (offset >= 0x800 && offset <= 0x83e) {
89 offset = 0x1c0 + (offset - 0x800);
90 return plane->offset + offset;
93 dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
95 return plane->offset + offset;
98 static inline u32 tegra_plane_readl(struct tegra_plane *plane,
101 return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
104 static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
107 tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
110 static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
114 mutex_lock(&wgrp->lock);
116 if (wgrp->usecount == 0) {
117 err = host1x_client_resume(wgrp->parent);
119 dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
123 reset_control_deassert(wgrp->rst);
129 mutex_unlock(&wgrp->lock);
133 static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
137 mutex_lock(&wgrp->lock);
139 if (wgrp->usecount == 1) {
140 err = reset_control_assert(wgrp->rst);
142 pr_err("failed to assert reset for window group %u\n",
146 host1x_client_suspend(wgrp->parent);
150 mutex_unlock(&wgrp->lock);
153 int tegra_display_hub_prepare(struct tegra_display_hub *hub)
158 * XXX Enabling/disabling windowgroups needs to happen when the owner
159 * display controller is disabled. There's currently no good point at
160 * which this could be executed, so unconditionally enable all window
163 for (i = 0; i < hub->soc->num_wgrps; i++) {
164 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
166 /* Skip orphaned window group whose parent DC is disabled */
168 tegra_windowgroup_enable(wgrp);
174 void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
179 * XXX Remove this once window groups can be more fine-grainedly
180 * enabled and disabled.
182 for (i = 0; i < hub->soc->num_wgrps; i++) {
183 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
185 /* Skip orphaned window group whose parent DC is disabled */
187 tegra_windowgroup_disable(wgrp);
191 static void tegra_shared_plane_update(struct tegra_plane *plane)
193 struct tegra_dc *dc = plane->dc;
194 unsigned long timeout;
197 mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
198 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
200 timeout = jiffies + msecs_to_jiffies(1000);
202 while (time_before(jiffies, timeout)) {
203 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
204 if ((value & mask) == 0)
207 usleep_range(100, 400);
211 static void tegra_shared_plane_activate(struct tegra_plane *plane)
213 struct tegra_dc *dc = plane->dc;
214 unsigned long timeout;
217 mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
218 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
220 timeout = jiffies + msecs_to_jiffies(1000);
222 while (time_before(jiffies, timeout)) {
223 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
224 if ((value & mask) == 0)
227 usleep_range(100, 400);
232 tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
234 unsigned int offset =
235 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
237 return tegra_dc_readl(dc, offset) & OWNER_MASK;
240 static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
241 struct tegra_plane *plane)
243 struct device *dev = dc->dev;
245 if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
249 dev_WARN(dev, "head %u owns window %u but is not attached\n",
250 dc->pipe, plane->index);
256 static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
257 struct tegra_dc *new)
259 unsigned int offset =
260 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
261 struct tegra_dc *old = plane->dc, *dc = new ? new : old;
262 struct device *dev = new ? new->dev : old->dev;
263 unsigned int owner, index = plane->index;
266 value = tegra_dc_readl(dc, offset);
267 owner = value & OWNER_MASK;
269 if (new && (owner != OWNER_MASK && owner != new->pipe)) {
270 dev_WARN(dev, "window %u owned by head %u\n", index, owner);
275 * This seems to happen whenever the head has been disabled with one
276 * or more windows being active. This is harmless because we'll just
277 * reassign the window to the new head anyway.
279 if (old && owner == OWNER_MASK)
280 dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
283 value &= ~OWNER_MASK;
286 value |= OWNER(new->pipe);
290 tegra_dc_writel(dc, value, offset);
297 static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
299 static const unsigned int coeffs[192] = {
300 0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
301 0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
302 0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
303 0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
304 0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
305 0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
306 0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
307 0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
308 0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
309 0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
310 0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
311 0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
312 0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
313 0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
314 0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
315 0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
316 0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
317 0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
318 0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
319 0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
320 0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
321 0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
322 0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
323 0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
324 0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
325 0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
326 0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
327 0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
328 0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
329 0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
330 0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
331 0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
332 0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
333 0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
334 0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
335 0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
336 0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
337 0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
338 0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
339 0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
340 0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
341 0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
342 0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
343 0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
344 0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
345 0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
346 0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
347 0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
349 unsigned int ratio, row, column;
351 for (ratio = 0; ratio <= 2; ratio++) {
352 for (row = 0; row <= 15; row++) {
353 for (column = 0; column <= 3; column++) {
354 unsigned int index = (ratio << 6) + (row << 2) + column;
357 value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
358 tegra_plane_writel(plane, value,
359 DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
365 static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
366 struct tegra_plane *plane)
371 if (!tegra_dc_owns_shared_plane(dc, plane)) {
372 err = tegra_shared_plane_set_owner(plane, dc);
377 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
378 value |= MODE_FOUR_LINES;
379 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
381 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
383 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
385 /* disable watermark */
386 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
387 value &= ~LATENCY_CTL_MODE_ENABLE;
388 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
390 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
391 value |= WATERMARK_MASK;
392 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
395 value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
396 value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
397 tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
399 /* mempool entries */
400 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
401 value = MEMPOOL_ENTRIES(0x331);
402 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
404 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
405 value &= ~THREAD_NUM_MASK;
406 value |= THREAD_NUM(plane->base.index);
407 value |= THREAD_GROUP_ENABLE;
408 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
410 tegra_shared_plane_setup_scaler(plane);
412 tegra_shared_plane_update(plane);
413 tegra_shared_plane_activate(plane);
416 static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
417 struct tegra_plane *plane)
419 tegra_shared_plane_set_owner(plane, NULL);
422 static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
423 struct drm_atomic_state *state)
425 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
427 struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
428 struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
429 struct tegra_bo_tiling *tiling = &plane_state->tiling;
430 struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
433 /* no need for further checks if the plane is being disabled */
434 if (!new_plane_state->crtc || !new_plane_state->fb)
437 err = tegra_plane_format(new_plane_state->fb->format->format,
438 &plane_state->format,
443 err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
447 if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
448 !dc->soc->supports_block_linear) {
449 DRM_ERROR("hardware doesn't support block linear mode\n");
453 if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
454 !dc->soc->supports_sector_layout) {
455 DRM_ERROR("hardware doesn't support GPU sector layout\n");
460 * Tegra doesn't support different strides for U and V planes so we
461 * error out if the user tries to display a framebuffer with such a
464 if (new_plane_state->fb->format->num_planes > 2) {
465 if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
466 DRM_ERROR("unsupported UV-plane configuration\n");
471 /* XXX scaling is not yet supported, add a check here */
473 err = tegra_plane_state_add(&tegra->base, new_plane_state);
480 static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
481 struct drm_atomic_state *state)
483 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
485 struct tegra_plane *p = to_tegra_plane(plane);
490 /* rien ne va plus */
491 if (!old_state || !old_state->crtc)
494 dc = to_tegra_dc(old_state->crtc);
496 err = host1x_client_resume(&dc->client);
498 dev_err(dc->dev, "failed to resume: %d\n", err);
503 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
504 * on planes that are already disabled. Make sure we fallback to the
505 * head for this particular state instead of crashing.
507 if (WARN_ON(p->dc == NULL))
510 value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
511 value &= ~WIN_ENABLE;
512 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
514 tegra_dc_remove_shared_plane(dc, p);
516 host1x_client_suspend(&dc->client);
519 static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
523 tmp = (u64)dfixed_trunc(in);
525 tmp1 = (tmp << NFB) + (tmp2 >> 1);
528 return lower_32_bits(tmp1);
531 static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
532 struct drm_atomic_state *state)
534 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
536 struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
537 struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
538 unsigned int zpos = new_state->normalized_zpos;
539 struct drm_framebuffer *fb = new_state->fb;
540 struct tegra_plane *p = to_tegra_plane(plane);
541 u32 value, min_width, bypass = 0;
542 dma_addr_t base, addr_flag = 0;
543 unsigned int bpc, planes;
547 /* rien ne va plus */
548 if (!new_state->crtc || !new_state->fb)
551 if (!new_state->visible) {
552 tegra_shared_plane_atomic_disable(plane, state);
556 err = host1x_client_resume(&dc->client);
558 dev_err(dc->dev, "failed to resume: %d\n", err);
562 yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planes, &bpc);
564 tegra_dc_assign_shared_plane(dc, p);
566 tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
569 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
570 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
571 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
572 tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
574 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
575 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
576 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
577 tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
579 value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
580 tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
583 min_width = min(new_state->src_w >> 16, new_state->crtc_w);
585 value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
587 if (min_width < MAX_PIXELS_5TAP444(value)) {
588 value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
590 value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
592 if (min_width < MAX_PIXELS_2TAP444(value))
593 value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
595 dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
598 value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
599 tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
601 if (new_state->src_w != new_state->crtc_w << 16) {
602 fixed20_12 width = dfixed_init(new_state->src_w >> 16);
603 u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
604 u32 init = (1 << (NFB - 1)) + (incr >> 1);
606 tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
607 tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
609 bypass |= INPUT_SCALER_HBYPASS;
612 if (new_state->src_h != new_state->crtc_h << 16) {
613 fixed20_12 height = dfixed_init(new_state->src_h >> 16);
614 u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
615 u32 init = (1 << (NFB - 1)) + (incr >> 1);
617 tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
618 tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
620 bypass |= INPUT_SCALER_VBYPASS;
623 tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
625 /* disable compression */
626 tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
628 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
630 * Physical address bit 39 in Tegra194 is used as a switch for special
631 * logic that swizzles the memory using either the legacy Tegra or the
632 * dGPU sector layout.
634 if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
635 addr_flag = BIT_ULL(39);
638 base = tegra_plane_state->iova[0] + fb->offsets[0];
641 tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
642 tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
644 value = V_POSITION(new_state->crtc_y) |
645 H_POSITION(new_state->crtc_x);
646 tegra_plane_writel(p, value, DC_WIN_POSITION);
648 value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
649 tegra_plane_writel(p, value, DC_WIN_SIZE);
651 value = WIN_ENABLE | COLOR_EXPAND;
652 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
654 value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
655 tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
657 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
658 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
660 value = PITCH(fb->pitches[0]);
661 tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
663 if (yuv && planes > 1) {
664 base = tegra_plane_state->iova[1] + fb->offsets[1];
667 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
668 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
671 base = tegra_plane_state->iova[2] + fb->offsets[2];
674 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
675 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
678 value = PITCH_U(fb->pitches[1]);
681 value |= PITCH_V(fb->pitches[2]);
683 tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
685 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
686 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
687 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
688 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
689 tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
692 value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
696 value |= DEGAMMA_YUV8_10;
698 value |= DEGAMMA_YUV12;
700 /* XXX parameterize */
701 value |= COLOR_SPACE_YUV_2020;
703 if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
704 value |= DEGAMMA_SRGB;
707 tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
709 value = OFFSET_X(new_state->src_y >> 16) |
710 OFFSET_Y(new_state->src_x >> 16);
711 tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
713 if (dc->soc->supports_block_linear) {
714 unsigned long height = tegra_plane_state->tiling.value;
717 switch (tegra_plane_state->tiling.mode) {
718 case TEGRA_BO_TILING_MODE_PITCH:
719 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
720 DC_WINBUF_SURFACE_KIND_PITCH;
723 /* XXX not supported on Tegra186 and later */
724 case TEGRA_BO_TILING_MODE_TILED:
725 value = DC_WINBUF_SURFACE_KIND_TILED;
728 case TEGRA_BO_TILING_MODE_BLOCK:
729 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
730 DC_WINBUF_SURFACE_KIND_BLOCK;
734 tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
737 /* disable gamut CSC */
738 value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
739 value &= ~CONTROL_CSC_ENABLE;
740 tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
742 host1x_client_suspend(&dc->client);
745 static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
746 .prepare_fb = tegra_plane_prepare_fb,
747 .cleanup_fb = tegra_plane_cleanup_fb,
748 .atomic_check = tegra_shared_plane_atomic_check,
749 .atomic_update = tegra_shared_plane_atomic_update,
750 .atomic_disable = tegra_shared_plane_atomic_disable,
753 struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
758 enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
759 struct tegra_drm *tegra = drm->dev_private;
760 struct tegra_display_hub *hub = tegra->hub;
761 struct tegra_shared_plane *plane;
762 unsigned int possible_crtcs;
763 unsigned int num_formats;
764 const u64 *modifiers;
769 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
771 return ERR_PTR(-ENOMEM);
773 plane->base.offset = 0x0a00 + 0x0300 * index;
774 plane->base.index = index;
776 plane->wgrp = &hub->wgrps[wgrp];
777 plane->wgrp->parent = &dc->client;
779 p = &plane->base.base;
781 /* planes can be assigned to arbitrary CRTCs */
782 possible_crtcs = BIT(tegra->num_crtcs) - 1;
784 num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
785 formats = tegra_shared_plane_formats;
786 modifiers = tegra_shared_plane_modifiers;
788 err = drm_universal_plane_init(drm, p, possible_crtcs,
789 &tegra_plane_funcs, formats,
790 num_formats, modifiers, type, NULL);
796 drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
797 drm_plane_create_zpos_property(p, 0, 0, 255);
802 static struct drm_private_state *
803 tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
805 struct tegra_display_hub_state *state;
807 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
811 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
816 static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
817 struct drm_private_state *state)
819 struct tegra_display_hub_state *hub_state =
820 to_tegra_display_hub_state(state);
825 static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
826 .atomic_duplicate_state = tegra_display_hub_duplicate_state,
827 .atomic_destroy_state = tegra_display_hub_destroy_state,
830 static struct tegra_display_hub_state *
831 tegra_display_hub_get_state(struct tegra_display_hub *hub,
832 struct drm_atomic_state *state)
834 struct drm_private_state *priv;
836 priv = drm_atomic_get_private_obj_state(state, &hub->base);
838 return ERR_CAST(priv);
840 return to_tegra_display_hub_state(priv);
843 int tegra_display_hub_atomic_check(struct drm_device *drm,
844 struct drm_atomic_state *state)
846 struct tegra_drm *tegra = drm->dev_private;
847 struct tegra_display_hub_state *hub_state;
848 struct drm_crtc_state *old, *new;
849 struct drm_crtc *crtc;
855 hub_state = tegra_display_hub_get_state(tegra->hub, state);
856 if (IS_ERR(hub_state))
857 return PTR_ERR(hub_state);
860 * The display hub display clock needs to be fed by the display clock
861 * with the highest frequency to ensure proper functioning of all the
864 * Note that this isn't used before Tegra186, but it doesn't hurt and
865 * conditionalizing it would make the code less clean.
867 for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
868 struct tegra_dc_state *dc = to_dc_state(new);
871 if (!hub_state->clk || dc->pclk > hub_state->rate) {
872 hub_state->dc = to_tegra_dc(dc->base.crtc);
873 hub_state->clk = hub_state->dc->clk;
874 hub_state->rate = dc->pclk;
882 static void tegra_display_hub_update(struct tegra_dc *dc)
887 err = host1x_client_resume(&dc->client);
889 dev_err(dc->dev, "failed to resume: %d\n", err);
893 value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
894 value &= ~LATENCY_EVENT;
895 tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
897 value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
898 value = CURS_SLOTS(1) | WGRP_SLOTS(1);
899 tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
901 tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
902 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
903 tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
904 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
906 host1x_client_suspend(&dc->client);
909 void tegra_display_hub_atomic_commit(struct drm_device *drm,
910 struct drm_atomic_state *state)
912 struct tegra_drm *tegra = drm->dev_private;
913 struct tegra_display_hub *hub = tegra->hub;
914 struct tegra_display_hub_state *hub_state;
915 struct device *dev = hub->client.dev;
918 hub_state = to_tegra_display_hub_state(hub->base.state);
920 if (hub_state->clk) {
921 err = clk_set_rate(hub_state->clk, hub_state->rate);
923 dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
924 hub_state->clk, hub_state->rate);
926 err = clk_set_parent(hub->clk_disp, hub_state->clk);
928 dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
929 hub->clk_disp, hub_state->clk, err);
933 tegra_display_hub_update(hub_state->dc);
936 static int tegra_display_hub_init(struct host1x_client *client)
938 struct tegra_display_hub *hub = to_tegra_display_hub(client);
939 struct drm_device *drm = dev_get_drvdata(client->host);
940 struct tegra_drm *tegra = drm->dev_private;
941 struct tegra_display_hub_state *state;
943 state = kzalloc(sizeof(*state), GFP_KERNEL);
947 drm_atomic_private_obj_init(drm, &hub->base, &state->base,
948 &tegra_display_hub_state_funcs);
955 static int tegra_display_hub_exit(struct host1x_client *client)
957 struct drm_device *drm = dev_get_drvdata(client->host);
958 struct tegra_drm *tegra = drm->dev_private;
960 drm_atomic_private_obj_fini(&tegra->hub->base);
966 static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
968 struct tegra_display_hub *hub = to_tegra_display_hub(client);
969 struct device *dev = client->dev;
970 unsigned int i = hub->num_heads;
973 err = reset_control_assert(hub->rst);
978 clk_disable_unprepare(hub->clk_heads[i]);
980 clk_disable_unprepare(hub->clk_hub);
981 clk_disable_unprepare(hub->clk_dsc);
982 clk_disable_unprepare(hub->clk_disp);
984 pm_runtime_put_sync(dev);
989 static int tegra_display_hub_runtime_resume(struct host1x_client *client)
991 struct tegra_display_hub *hub = to_tegra_display_hub(client);
992 struct device *dev = client->dev;
996 err = pm_runtime_resume_and_get(dev);
998 dev_err(dev, "failed to get runtime PM: %d\n", err);
1002 err = clk_prepare_enable(hub->clk_disp);
1006 err = clk_prepare_enable(hub->clk_dsc);
1010 err = clk_prepare_enable(hub->clk_hub);
1014 for (i = 0; i < hub->num_heads; i++) {
1015 err = clk_prepare_enable(hub->clk_heads[i]);
1020 err = reset_control_deassert(hub->rst);
1028 clk_disable_unprepare(hub->clk_heads[i]);
1030 clk_disable_unprepare(hub->clk_hub);
1032 clk_disable_unprepare(hub->clk_dsc);
1034 clk_disable_unprepare(hub->clk_disp);
1036 pm_runtime_put_sync(dev);
1040 static const struct host1x_client_ops tegra_display_hub_ops = {
1041 .init = tegra_display_hub_init,
1042 .exit = tegra_display_hub_exit,
1043 .suspend = tegra_display_hub_runtime_suspend,
1044 .resume = tegra_display_hub_runtime_resume,
1047 static int tegra_display_hub_probe(struct platform_device *pdev)
1049 u64 dma_mask = dma_get_mask(pdev->dev.parent);
1050 struct device_node *child = NULL;
1051 struct tegra_display_hub *hub;
1056 err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
1058 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1062 hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
1066 hub->soc = of_device_get_match_data(&pdev->dev);
1068 hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
1069 if (IS_ERR(hub->clk_disp)) {
1070 err = PTR_ERR(hub->clk_disp);
1074 if (hub->soc->supports_dsc) {
1075 hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
1076 if (IS_ERR(hub->clk_dsc)) {
1077 err = PTR_ERR(hub->clk_dsc);
1082 hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
1083 if (IS_ERR(hub->clk_hub)) {
1084 err = PTR_ERR(hub->clk_hub);
1088 hub->rst = devm_reset_control_get(&pdev->dev, "misc");
1089 if (IS_ERR(hub->rst)) {
1090 err = PTR_ERR(hub->rst);
1094 hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
1095 sizeof(*hub->wgrps), GFP_KERNEL);
1099 for (i = 0; i < hub->soc->num_wgrps; i++) {
1100 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1103 snprintf(id, sizeof(id), "wgrp%u", i);
1104 mutex_init(&wgrp->lock);
1108 wgrp->rst = devm_reset_control_get(&pdev->dev, id);
1109 if (IS_ERR(wgrp->rst))
1110 return PTR_ERR(wgrp->rst);
1112 err = reset_control_assert(wgrp->rst);
1117 hub->num_heads = of_get_child_count(pdev->dev.of_node);
1119 hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
1121 if (!hub->clk_heads)
1124 for (i = 0; i < hub->num_heads; i++) {
1125 child = of_get_next_child(pdev->dev.of_node, child);
1127 dev_err(&pdev->dev, "failed to find node for head %u\n",
1132 clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
1134 dev_err(&pdev->dev, "failed to get clock for head %u\n",
1137 return PTR_ERR(clk);
1140 hub->clk_heads[i] = clk;
1145 /* XXX: enable clock across reset? */
1146 err = reset_control_assert(hub->rst);
1150 platform_set_drvdata(pdev, hub);
1151 pm_runtime_enable(&pdev->dev);
1153 INIT_LIST_HEAD(&hub->client.list);
1154 hub->client.ops = &tegra_display_hub_ops;
1155 hub->client.dev = &pdev->dev;
1157 err = host1x_client_register(&hub->client);
1159 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1162 err = devm_of_platform_populate(&pdev->dev);
1169 host1x_client_unregister(&hub->client);
1170 pm_runtime_disable(&pdev->dev);
1174 static int tegra_display_hub_remove(struct platform_device *pdev)
1176 struct tegra_display_hub *hub = platform_get_drvdata(pdev);
1180 err = host1x_client_unregister(&hub->client);
1182 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
1186 for (i = 0; i < hub->soc->num_wgrps; i++) {
1187 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1189 mutex_destroy(&wgrp->lock);
1192 pm_runtime_disable(&pdev->dev);
1197 static const struct tegra_display_hub_soc tegra186_display_hub = {
1199 .supports_dsc = true,
1202 static const struct tegra_display_hub_soc tegra194_display_hub = {
1204 .supports_dsc = false,
1207 static const struct of_device_id tegra_display_hub_of_match[] = {
1209 .compatible = "nvidia,tegra194-display",
1210 .data = &tegra194_display_hub
1212 .compatible = "nvidia,tegra186-display",
1213 .data = &tegra186_display_hub
1218 MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1220 struct platform_driver tegra_display_hub_driver = {
1222 .name = "tegra-display-hub",
1223 .of_match_table = tegra_display_hub_of_match,
1225 .probe = tegra_display_hub_probe,
1226 .remove = tegra_display_hub_remove,