GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / gpu / drm / bridge / cadence / cdns-mhdp8546-core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cadence MHDP8546 DP bridge driver.
4  *
5  * Copyright (C) 2020 Cadence Design Systems, Inc.
6  *
7  * Authors: Quentin Schulz <quentin.schulz@free-electrons.com>
8  *          Swapnil Jakhade <sjakhade@cadence.com>
9  *          Yuti Amonkar <yamonkar@cadence.com>
10  *          Tomi Valkeinen <tomi.valkeinen@ti.com>
11  *          Jyri Sarha <jsarha@ti.com>
12  *
13  * TODO:
14  *     - Implement optimized mailbox communication using mailbox interrupts
15  *     - Add support for power management
16  *     - Add support for features like audio, MST and fast link training
17  *     - Implement request_fw_cancel to handle HW_STATE
18  *     - Fix asynchronous loading of firmware implementation
19  *     - Add DRM helper function for cdns_mhdp_lower_link_rate
20  */
21
22 #include <linux/clk.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/firmware.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/module.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/phy/phy.h>
33 #include <linux/phy/phy-dp.h>
34 #include <linux/platform_device.h>
35 #include <linux/slab.h>
36 #include <linux/wait.h>
37
38 #include <drm/drm_atomic.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_atomic_state_helper.h>
41 #include <drm/drm_bridge.h>
42 #include <drm/drm_connector.h>
43 #include <drm/drm_crtc_helper.h>
44 #include <drm/drm_dp_helper.h>
45 #include <drm/drm_modeset_helper_vtables.h>
46 #include <drm/drm_print.h>
47 #include <drm/drm_probe_helper.h>
48
49 #include <asm/unaligned.h>
50
51 #include "cdns-mhdp8546-core.h"
52
53 #include "cdns-mhdp8546-j721e.h"
54
55 static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
56 {
57         int ret, empty;
58
59         WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
60
61         ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
62                                  empty, !empty, MAILBOX_RETRY_US,
63                                  MAILBOX_TIMEOUT_US);
64         if (ret < 0)
65                 return ret;
66
67         return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
68 }
69
70 static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
71 {
72         int ret, full;
73
74         WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
75
76         ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
77                                  full, !full, MAILBOX_RETRY_US,
78                                  MAILBOX_TIMEOUT_US);
79         if (ret < 0)
80                 return ret;
81
82         writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
83
84         return 0;
85 }
86
87 static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp,
88                                          u8 module_id, u8 opcode,
89                                          u16 req_size)
90 {
91         u32 mbox_size, i;
92         u8 header[4];
93         int ret;
94
95         /* read the header of the message */
96         for (i = 0; i < sizeof(header); i++) {
97                 ret = cdns_mhdp_mailbox_read(mhdp);
98                 if (ret < 0)
99                         return ret;
100
101                 header[i] = ret;
102         }
103
104         mbox_size = get_unaligned_be16(header + 2);
105
106         if (opcode != header[0] || module_id != header[1] ||
107             req_size != mbox_size) {
108                 /*
109                  * If the message in mailbox is not what we want, we need to
110                  * clear the mailbox by reading its contents.
111                  */
112                 for (i = 0; i < mbox_size; i++)
113                         if (cdns_mhdp_mailbox_read(mhdp) < 0)
114                                 break;
115
116                 return -EINVAL;
117         }
118
119         return 0;
120 }
121
122 static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp,
123                                        u8 *buff, u16 buff_size)
124 {
125         u32 i;
126         int ret;
127
128         for (i = 0; i < buff_size; i++) {
129                 ret = cdns_mhdp_mailbox_read(mhdp);
130                 if (ret < 0)
131                         return ret;
132
133                 buff[i] = ret;
134         }
135
136         return 0;
137 }
138
139 static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
140                                   u8 opcode, u16 size, u8 *message)
141 {
142         u8 header[4];
143         int ret, i;
144
145         header[0] = opcode;
146         header[1] = module_id;
147         put_unaligned_be16(size, header + 2);
148
149         for (i = 0; i < sizeof(header); i++) {
150                 ret = cdns_mhdp_mailbox_write(mhdp, header[i]);
151                 if (ret)
152                         return ret;
153         }
154
155         for (i = 0; i < size; i++) {
156                 ret = cdns_mhdp_mailbox_write(mhdp, message[i]);
157                 if (ret)
158                         return ret;
159         }
160
161         return 0;
162 }
163
164 static
165 int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
166 {
167         u8 msg[4], resp[8];
168         int ret;
169
170         put_unaligned_be32(addr, msg);
171
172         mutex_lock(&mhdp->mbox_mutex);
173
174         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
175                                      GENERAL_REGISTER_READ,
176                                      sizeof(msg), msg);
177         if (ret)
178                 goto out;
179
180         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL,
181                                             GENERAL_REGISTER_READ,
182                                             sizeof(resp));
183         if (ret)
184                 goto out;
185
186         ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp));
187         if (ret)
188                 goto out;
189
190         /* Returned address value should be the same as requested */
191         if (memcmp(msg, resp, sizeof(msg))) {
192                 ret = -EINVAL;
193                 goto out;
194         }
195
196         *value = get_unaligned_be32(resp + 4);
197
198 out:
199         mutex_unlock(&mhdp->mbox_mutex);
200         if (ret) {
201                 dev_err(mhdp->dev, "Failed to read register\n");
202                 *value = 0;
203         }
204
205         return ret;
206 }
207
208 static
209 int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
210 {
211         u8 msg[6];
212         int ret;
213
214         put_unaligned_be16(addr, msg);
215         put_unaligned_be32(val, msg + 2);
216
217         mutex_lock(&mhdp->mbox_mutex);
218
219         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
220                                      DPTX_WRITE_REGISTER, sizeof(msg), msg);
221
222         mutex_unlock(&mhdp->mbox_mutex);
223
224         return ret;
225 }
226
227 static
228 int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
229                             u8 start_bit, u8 bits_no, u32 val)
230 {
231         u8 field[8];
232         int ret;
233
234         put_unaligned_be16(addr, field);
235         field[2] = start_bit;
236         field[3] = bits_no;
237         put_unaligned_be32(val, field + 4);
238
239         mutex_lock(&mhdp->mbox_mutex);
240
241         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
242                                      DPTX_WRITE_FIELD, sizeof(field), field);
243
244         mutex_unlock(&mhdp->mbox_mutex);
245
246         return ret;
247 }
248
249 static
250 int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
251                         u32 addr, u8 *data, u16 len)
252 {
253         u8 msg[5], reg[5];
254         int ret;
255
256         put_unaligned_be16(len, msg);
257         put_unaligned_be24(addr, msg + 2);
258
259         mutex_lock(&mhdp->mbox_mutex);
260
261         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
262                                      DPTX_READ_DPCD, sizeof(msg), msg);
263         if (ret)
264                 goto out;
265
266         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
267                                             DPTX_READ_DPCD,
268                                             sizeof(reg) + len);
269         if (ret)
270                 goto out;
271
272         ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
273         if (ret)
274                 goto out;
275
276         ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len);
277
278 out:
279         mutex_unlock(&mhdp->mbox_mutex);
280
281         return ret;
282 }
283
284 static
285 int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
286 {
287         u8 msg[6], reg[5];
288         int ret;
289
290         put_unaligned_be16(1, msg);
291         put_unaligned_be24(addr, msg + 2);
292         msg[5] = value;
293
294         mutex_lock(&mhdp->mbox_mutex);
295
296         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
297                                      DPTX_WRITE_DPCD, sizeof(msg), msg);
298         if (ret)
299                 goto out;
300
301         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
302                                             DPTX_WRITE_DPCD, sizeof(reg));
303         if (ret)
304                 goto out;
305
306         ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
307         if (ret)
308                 goto out;
309
310         if (addr != get_unaligned_be24(reg + 2))
311                 ret = -EINVAL;
312
313 out:
314         mutex_unlock(&mhdp->mbox_mutex);
315
316         if (ret)
317                 dev_err(mhdp->dev, "dpcd write failed: %d\n", ret);
318         return ret;
319 }
320
321 static
322 int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
323 {
324         u8 msg[5];
325         int ret, i;
326
327         msg[0] = GENERAL_MAIN_CONTROL;
328         msg[1] = MB_MODULE_ID_GENERAL;
329         msg[2] = 0;
330         msg[3] = 1;
331         msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
332
333         mutex_lock(&mhdp->mbox_mutex);
334
335         for (i = 0; i < sizeof(msg); i++) {
336                 ret = cdns_mhdp_mailbox_write(mhdp, msg[i]);
337                 if (ret)
338                         goto out;
339         }
340
341         /* read the firmware state */
342         ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg));
343         if (ret)
344                 goto out;
345
346         ret = 0;
347
348 out:
349         mutex_unlock(&mhdp->mbox_mutex);
350
351         if (ret < 0)
352                 dev_err(mhdp->dev, "set firmware active failed\n");
353         return ret;
354 }
355
356 static
357 int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
358 {
359         u8 status;
360         int ret;
361
362         mutex_lock(&mhdp->mbox_mutex);
363
364         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
365                                      DPTX_HPD_STATE, 0, NULL);
366         if (ret)
367                 goto err_get_hpd;
368
369         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
370                                             DPTX_HPD_STATE,
371                                             sizeof(status));
372         if (ret)
373                 goto err_get_hpd;
374
375         ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status));
376         if (ret)
377                 goto err_get_hpd;
378
379         mutex_unlock(&mhdp->mbox_mutex);
380
381         dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__,
382                 status ? "" : "un");
383
384         return status;
385
386 err_get_hpd:
387         mutex_unlock(&mhdp->mbox_mutex);
388
389         return ret;
390 }
391
392 static
393 int cdns_mhdp_get_edid_block(void *data, u8 *edid,
394                              unsigned int block, size_t length)
395 {
396         struct cdns_mhdp_device *mhdp = data;
397         u8 msg[2], reg[2], i;
398         int ret;
399
400         mutex_lock(&mhdp->mbox_mutex);
401
402         for (i = 0; i < 4; i++) {
403                 msg[0] = block / 2;
404                 msg[1] = block % 2;
405
406                 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
407                                              DPTX_GET_EDID, sizeof(msg), msg);
408                 if (ret)
409                         continue;
410
411                 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
412                                                     DPTX_GET_EDID,
413                                                     sizeof(reg) + length);
414                 if (ret)
415                         continue;
416
417                 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
418                 if (ret)
419                         continue;
420
421                 ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length);
422                 if (ret)
423                         continue;
424
425                 if (reg[0] == length && reg[1] == block / 2)
426                         break;
427         }
428
429         mutex_unlock(&mhdp->mbox_mutex);
430
431         if (ret)
432                 dev_err(mhdp->dev, "get block[%d] edid failed: %d\n",
433                         block, ret);
434
435         return ret;
436 }
437
438 static
439 int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp)
440 {
441         u8 event = 0;
442         int ret;
443
444         mutex_lock(&mhdp->mbox_mutex);
445
446         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
447                                      DPTX_READ_EVENT, 0, NULL);
448         if (ret)
449                 goto out;
450
451         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
452                                             DPTX_READ_EVENT, sizeof(event));
453         if (ret < 0)
454                 goto out;
455
456         ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event));
457 out:
458         mutex_unlock(&mhdp->mbox_mutex);
459
460         if (ret < 0)
461                 return ret;
462
463         dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__,
464                 (event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "",
465                 (event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "",
466                 (event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "",
467                 (event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : "");
468
469         return event;
470 }
471
472 static
473 int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes,
474                         unsigned int udelay, const u8 *lanes_data,
475                         u8 link_status[DP_LINK_STATUS_SIZE])
476 {
477         u8 payload[7];
478         u8 hdr[5]; /* For DPCD read response header */
479         u32 addr;
480         int ret;
481
482         if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
483                 dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes);
484                 ret = -EINVAL;
485                 goto out;
486         }
487
488         payload[0] = nlanes;
489         put_unaligned_be16(udelay, payload + 1);
490         memcpy(payload + 3, lanes_data, nlanes);
491
492         mutex_lock(&mhdp->mbox_mutex);
493
494         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
495                                      DPTX_ADJUST_LT,
496                                      sizeof(payload), payload);
497         if (ret)
498                 goto out;
499
500         /* Yes, read the DPCD read command response */
501         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
502                                             DPTX_READ_DPCD,
503                                             sizeof(hdr) + DP_LINK_STATUS_SIZE);
504         if (ret)
505                 goto out;
506
507         ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr));
508         if (ret)
509                 goto out;
510
511         addr = get_unaligned_be24(hdr + 2);
512         if (addr != DP_LANE0_1_STATUS)
513                 goto out;
514
515         ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status,
516                                           DP_LINK_STATUS_SIZE);
517
518 out:
519         mutex_unlock(&mhdp->mbox_mutex);
520
521         if (ret)
522                 dev_err(mhdp->dev, "Failed to adjust Link Training.\n");
523
524         return ret;
525 }
526
527 /**
528  * cdns_mhdp_link_power_up() - power up a DisplayPort link
529  * @aux: DisplayPort AUX channel
530  * @link: pointer to a structure containing the link configuration
531  *
532  * Returns 0 on success or a negative error code on failure.
533  */
534 static
535 int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
536 {
537         u8 value;
538         int err;
539
540         /* DP_SET_POWER register is only available on DPCD v1.1 and later */
541         if (link->revision < 0x11)
542                 return 0;
543
544         err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
545         if (err < 0)
546                 return err;
547
548         value &= ~DP_SET_POWER_MASK;
549         value |= DP_SET_POWER_D0;
550
551         err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
552         if (err < 0)
553                 return err;
554
555         /*
556          * According to the DP 1.1 specification, a "Sink Device must exit the
557          * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
558          * Control Field" (register 0x600).
559          */
560         usleep_range(1000, 2000);
561
562         return 0;
563 }
564
565 /**
566  * cdns_mhdp_link_power_down() - power down a DisplayPort link
567  * @aux: DisplayPort AUX channel
568  * @link: pointer to a structure containing the link configuration
569  *
570  * Returns 0 on success or a negative error code on failure.
571  */
572 static
573 int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
574                               struct cdns_mhdp_link *link)
575 {
576         u8 value;
577         int err;
578
579         /* DP_SET_POWER register is only available on DPCD v1.1 and later */
580         if (link->revision < 0x11)
581                 return 0;
582
583         err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
584         if (err < 0)
585                 return err;
586
587         value &= ~DP_SET_POWER_MASK;
588         value |= DP_SET_POWER_D3;
589
590         err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
591         if (err < 0)
592                 return err;
593
594         return 0;
595 }
596
597 /**
598  * cdns_mhdp_link_configure() - configure a DisplayPort link
599  * @aux: DisplayPort AUX channel
600  * @link: pointer to a structure containing the link configuration
601  *
602  * Returns 0 on success or a negative error code on failure.
603  */
604 static
605 int cdns_mhdp_link_configure(struct drm_dp_aux *aux,
606                              struct cdns_mhdp_link *link)
607 {
608         u8 values[2];
609         int err;
610
611         values[0] = drm_dp_link_rate_to_bw_code(link->rate);
612         values[1] = link->num_lanes;
613
614         if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
615                 values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
616
617         err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
618         if (err < 0)
619                 return err;
620
621         return 0;
622 }
623
624 static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp)
625 {
626         return min(mhdp->host.link_rate, mhdp->sink.link_rate);
627 }
628
629 static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp)
630 {
631         return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt);
632 }
633
634 static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp)
635 {
636         return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp);
637 }
638
639 static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp)
640 {
641         /* Check if SSC is supported by both sides */
642         return mhdp->host.ssc && mhdp->sink.ssc;
643 }
644
645 static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp)
646 {
647         dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged);
648
649         if (mhdp->plugged)
650                 return connector_status_connected;
651         else
652                 return connector_status_disconnected;
653 }
654
655 static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp)
656 {
657         u32 major_num, minor_num, revision;
658         u32 fw_ver, lib_ver;
659
660         fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8)
661                | readl(mhdp->regs + CDNS_VER_L);
662
663         lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8)
664                 | readl(mhdp->regs + CDNS_LIB_L_ADDR);
665
666         if (lib_ver < 33984) {
667                 /*
668                  * Older FW versions with major number 1, used to store FW
669                  * version information by storing repository revision number
670                  * in registers. This is for identifying these FW versions.
671                  */
672                 major_num = 1;
673                 minor_num = 2;
674                 if (fw_ver == 26098) {
675                         revision = 15;
676                 } else if (lib_ver == 0 && fw_ver == 0) {
677                         revision = 17;
678                 } else {
679                         dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n",
680                                 fw_ver, lib_ver);
681                         return -ENODEV;
682                 }
683         } else {
684                 /* To identify newer FW versions with major number 2 onwards. */
685                 major_num = fw_ver / 10000;
686                 minor_num = (fw_ver / 100) % 100;
687                 revision = (fw_ver % 10000) % 100;
688         }
689
690         dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num,
691                 revision);
692         return 0;
693 }
694
695 static int cdns_mhdp_fw_activate(const struct firmware *fw,
696                                  struct cdns_mhdp_device *mhdp)
697 {
698         unsigned int reg;
699         int ret;
700
701         /* Release uCPU reset and stall it. */
702         writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
703
704         memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size);
705
706         /* Leave debug mode, release stall */
707         writel(0, mhdp->regs + CDNS_APB_CTRL);
708
709         /*
710          * Wait for the KEEP_ALIVE "message" on the first 8 bits.
711          * Updated each sched "tick" (~2ms)
712          */
713         ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
714                                  reg & CDNS_KEEP_ALIVE_MASK, 500,
715                                  CDNS_KEEP_ALIVE_TIMEOUT);
716         if (ret) {
717                 dev_err(mhdp->dev,
718                         "device didn't give any life sign: reg %d\n", reg);
719                 return ret;
720         }
721
722         ret = cdns_mhdp_check_fw_version(mhdp);
723         if (ret)
724                 return ret;
725
726         /* Init events to 0 as it's not cleared by FW at boot but on read */
727         readl(mhdp->regs + CDNS_SW_EVENT0);
728         readl(mhdp->regs + CDNS_SW_EVENT1);
729         readl(mhdp->regs + CDNS_SW_EVENT2);
730         readl(mhdp->regs + CDNS_SW_EVENT3);
731
732         /* Activate uCPU */
733         ret = cdns_mhdp_set_firmware_active(mhdp, true);
734         if (ret)
735                 return ret;
736
737         spin_lock(&mhdp->start_lock);
738
739         mhdp->hw_state = MHDP_HW_READY;
740
741         /*
742          * Here we must keep the lock while enabling the interrupts
743          * since it would otherwise be possible that interrupt enable
744          * code is executed after the bridge is detached. The similar
745          * situation is not possible in attach()/detach() callbacks
746          * since the hw_state changes from MHDP_HW_READY to
747          * MHDP_HW_STOPPED happens only due to driver removal when
748          * bridge should already be detached.
749          */
750         if (mhdp->bridge_attached)
751                 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
752                        mhdp->regs + CDNS_APB_INT_MASK);
753
754         spin_unlock(&mhdp->start_lock);
755
756         wake_up(&mhdp->fw_load_wq);
757         dev_dbg(mhdp->dev, "DP FW activated\n");
758
759         return 0;
760 }
761
762 static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context)
763 {
764         struct cdns_mhdp_device *mhdp = context;
765         bool bridge_attached;
766         int ret;
767
768         dev_dbg(mhdp->dev, "firmware callback\n");
769
770         if (!fw || !fw->data) {
771                 dev_err(mhdp->dev, "%s: No firmware.\n", __func__);
772                 return;
773         }
774
775         ret = cdns_mhdp_fw_activate(fw, mhdp);
776
777         release_firmware(fw);
778
779         if (ret)
780                 return;
781
782         /*
783          *  XXX how to make sure the bridge is still attached when
784          *      calling drm_kms_helper_hotplug_event() after releasing
785          *      the lock? We should not hold the spin lock when
786          *      calling drm_kms_helper_hotplug_event() since it may
787          *      cause a dead lock. FB-dev console calls detect from the
788          *      same thread just down the call stack started here.
789          */
790         spin_lock(&mhdp->start_lock);
791         bridge_attached = mhdp->bridge_attached;
792         spin_unlock(&mhdp->start_lock);
793         if (bridge_attached) {
794                 if (mhdp->connector.dev)
795                         drm_kms_helper_hotplug_event(mhdp->bridge.dev);
796                 else
797                         drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
798         }
799 }
800
801 static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp)
802 {
803         int ret;
804
805         ret = reject_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev,
806                                       GFP_KERNEL, mhdp, cdns_mhdp_fw_cb);
807         if (ret) {
808                 dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
809                         FW_NAME, ret);
810                 return ret;
811         }
812
813         return 0;
814 }
815
816 static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux,
817                                   struct drm_dp_aux_msg *msg)
818 {
819         struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev);
820         int ret;
821
822         if (msg->request != DP_AUX_NATIVE_WRITE &&
823             msg->request != DP_AUX_NATIVE_READ)
824                 return -EOPNOTSUPP;
825
826         if (msg->request == DP_AUX_NATIVE_WRITE) {
827                 const u8 *buf = msg->buffer;
828                 unsigned int i;
829
830                 for (i = 0; i < msg->size; ++i) {
831                         ret = cdns_mhdp_dpcd_write(mhdp,
832                                                    msg->address + i, buf[i]);
833                         if (!ret)
834                                 continue;
835
836                         dev_err(mhdp->dev,
837                                 "Failed to write DPCD addr %u\n",
838                                 msg->address + i);
839
840                         return ret;
841                 }
842         } else {
843                 ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
844                                           msg->buffer, msg->size);
845                 if (ret) {
846                         dev_err(mhdp->dev,
847                                 "Failed to read DPCD addr %u\n",
848                                 msg->address);
849
850                         return ret;
851                 }
852         }
853
854         return msg->size;
855 }
856
857 static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
858 {
859         union phy_configure_opts phy_cfg;
860         u32 reg32;
861         int ret;
862
863         drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
864                            DP_TRAINING_PATTERN_DISABLE);
865
866         /* Reset PHY configuration */
867         reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
868         if (!mhdp->host.scrambler)
869                 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
870
871         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
872
873         cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD,
874                             mhdp->sink.enhanced & mhdp->host.enhanced);
875
876         cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN,
877                             CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
878
879         cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link);
880         phy_cfg.dp.link_rate = mhdp->link.rate / 100;
881         phy_cfg.dp.lanes = mhdp->link.num_lanes;
882
883         memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage));
884         memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre));
885
886         phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
887         phy_cfg.dp.set_lanes = true;
888         phy_cfg.dp.set_rate = true;
889         phy_cfg.dp.set_voltages = true;
890         ret = phy_configure(mhdp->phy,  &phy_cfg);
891         if (ret) {
892                 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
893                         __func__, ret);
894                 return ret;
895         }
896
897         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG,
898                             CDNS_PHY_COMMON_CONFIG |
899                             CDNS_PHY_TRAINING_EN |
900                             CDNS_PHY_TRAINING_TYPE(1) |
901                             CDNS_PHY_SCRAMBLER_BYPASS);
902
903         drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
904                            DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE);
905
906         return 0;
907 }
908
909 static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp,
910                                        u8 link_status[DP_LINK_STATUS_SIZE],
911                                        u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
912                                        union phy_configure_opts *phy_cfg)
913 {
914         u8 adjust, max_pre_emph, max_volt_swing;
915         u8 set_volt, set_pre;
916         unsigned int i;
917
918         max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis)
919                            << DP_TRAIN_PRE_EMPHASIS_SHIFT;
920         max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing);
921
922         for (i = 0; i < mhdp->link.num_lanes; i++) {
923                 /* Check if Voltage swing and pre-emphasis are within limits */
924                 adjust = drm_dp_get_adjust_request_voltage(link_status, i);
925                 set_volt = min(adjust, max_volt_swing);
926
927                 adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
928                 set_pre = min(adjust, max_pre_emph)
929                           >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
930
931                 /*
932                  * Voltage swing level and pre-emphasis level combination is
933                  * not allowed: leaving pre-emphasis as-is, and adjusting
934                  * voltage swing.
935                  */
936                 if (set_volt + set_pre > 3)
937                         set_volt = 3 - set_pre;
938
939                 phy_cfg->dp.voltage[i] = set_volt;
940                 lanes_data[i] = set_volt;
941
942                 if (set_volt == max_volt_swing)
943                         lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED;
944
945                 phy_cfg->dp.pre[i] = set_pre;
946                 lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT);
947
948                 if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT))
949                         lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
950         }
951 }
952
953 static
954 void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
955                                           unsigned int lane, u8 volt)
956 {
957         unsigned int s = ((lane & 1) ?
958                           DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
959                           DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
960         unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
961
962         link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s);
963         link_status[idx] |= volt << s;
964 }
965
966 static
967 void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
968                                                unsigned int lane, u8 pre_emphasis)
969 {
970         unsigned int s = ((lane & 1) ?
971                           DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
972                           DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
973         unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
974
975         link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s);
976         link_status[idx] |= pre_emphasis << s;
977 }
978
979 static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp,
980                                           u8 link_status[DP_LINK_STATUS_SIZE])
981 {
982         u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
983         u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
984         unsigned int i;
985         u8 volt, pre;
986
987         for (i = 0; i < mhdp->link.num_lanes; i++) {
988                 volt = drm_dp_get_adjust_request_voltage(link_status, i);
989                 pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
990                 if (volt + pre > 3)
991                         cdns_mhdp_set_adjust_request_voltage(link_status, i,
992                                                              3 - pre);
993                 if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING)
994                         cdns_mhdp_set_adjust_request_voltage(link_status, i,
995                                                              max_volt);
996                 if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS)
997                         cdns_mhdp_set_adjust_request_pre_emphasis(link_status,
998                                                                   i, max_pre);
999         }
1000 }
1001
1002 static void cdns_mhdp_print_lt_status(const char *prefix,
1003                                       struct cdns_mhdp_device *mhdp,
1004                                       union phy_configure_opts *phy_cfg)
1005 {
1006         char vs[8] = "0/0/0/0";
1007         char pe[8] = "0/0/0/0";
1008         unsigned int i;
1009
1010         for (i = 0; i < mhdp->link.num_lanes; i++) {
1011                 vs[i * 2] = '0' + phy_cfg->dp.voltage[i];
1012                 pe[i * 2] = '0' + phy_cfg->dp.pre[i];
1013         }
1014
1015         vs[i * 2 - 1] = '\0';
1016         pe[i * 2 - 1] = '\0';
1017
1018         dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n",
1019                 prefix,
1020                 mhdp->link.num_lanes, mhdp->link.rate / 100,
1021                 vs, pe);
1022 }
1023
1024 static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
1025                                                u8 eq_tps,
1026                                                unsigned int training_interval)
1027 {
1028         u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
1029         u8 link_status[DP_LINK_STATUS_SIZE];
1030         union phy_configure_opts phy_cfg;
1031         u32 reg32;
1032         int ret;
1033         bool r;
1034
1035         dev_dbg(mhdp->dev, "Starting EQ phase\n");
1036
1037         /* Enable link training TPS[eq_tps] in PHY */
1038         reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN |
1039                 CDNS_PHY_TRAINING_TYPE(eq_tps);
1040         if (eq_tps != 4)
1041                 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1042         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1043
1044         drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1045                            (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
1046                            CDNS_DP_TRAINING_PATTERN_4);
1047
1048         drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1049
1050         do {
1051                 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1052                                            &phy_cfg);
1053                 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1054                 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1055                 phy_cfg.dp.set_lanes = false;
1056                 phy_cfg.dp.set_rate = false;
1057                 phy_cfg.dp.set_voltages = true;
1058                 ret = phy_configure(mhdp->phy,  &phy_cfg);
1059                 if (ret) {
1060                         dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1061                                 __func__, ret);
1062                         goto err;
1063                 }
1064
1065                 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
1066                                     training_interval, lanes_data, link_status);
1067
1068                 r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes);
1069                 if (!r)
1070                         goto err;
1071
1072                 if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) {
1073                         cdns_mhdp_print_lt_status("EQ phase ok", mhdp,
1074                                                   &phy_cfg);
1075                         return true;
1076                 }
1077
1078                 fail_counter_short++;
1079
1080                 cdns_mhdp_adjust_requested_eq(mhdp, link_status);
1081         } while (fail_counter_short < 5);
1082
1083 err:
1084         cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg);
1085
1086         return false;
1087 }
1088
1089 static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp,
1090                                           u8 link_status[DP_LINK_STATUS_SIZE],
1091                                           u8 *req_volt, u8 *req_pre)
1092 {
1093         const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1094         const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1095         unsigned int i;
1096
1097         for (i = 0; i < mhdp->link.num_lanes; i++) {
1098                 u8 val;
1099
1100                 val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ?
1101                       max_volt : req_volt[i];
1102                 cdns_mhdp_set_adjust_request_voltage(link_status, i, val);
1103
1104                 val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ?
1105                       max_pre : req_pre[i];
1106                 cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val);
1107         }
1108 }
1109
1110 static
1111 void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
1112                            bool *same_before_adjust, bool *max_swing_reached,
1113                            u8 before_cr[CDNS_DP_MAX_NUM_LANES],
1114                            u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt,
1115                            u8 *req_pre)
1116 {
1117         const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1118         const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1119         bool same_pre, same_volt;
1120         unsigned int i;
1121         u8 adjust;
1122
1123         *same_before_adjust = false;
1124         *max_swing_reached = false;
1125         *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes);
1126
1127         for (i = 0; i < mhdp->link.num_lanes; i++) {
1128                 adjust = drm_dp_get_adjust_request_voltage(after_cr, i);
1129                 req_volt[i] = min(adjust, max_volt);
1130
1131                 adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >>
1132                       DP_TRAIN_PRE_EMPHASIS_SHIFT;
1133                 req_pre[i] = min(adjust, max_pre);
1134
1135                 same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) ==
1136                            req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1137                 same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
1138                             req_volt[i];
1139                 if (same_pre && same_volt)
1140                         *same_before_adjust = true;
1141
1142                 /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */
1143                 if (!*cr_done && req_volt[i] + req_pre[i] >= 3) {
1144                         *max_swing_reached = true;
1145                         return;
1146                 }
1147         }
1148 }
1149
1150 static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp)
1151 {
1152         u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
1153         fail_counter_short = 0, fail_counter_cr_long = 0;
1154         u8 link_status[DP_LINK_STATUS_SIZE];
1155         bool cr_done;
1156         union phy_configure_opts phy_cfg;
1157         int ret;
1158
1159         dev_dbg(mhdp->dev, "Starting CR phase\n");
1160
1161         ret = cdns_mhdp_link_training_init(mhdp);
1162         if (ret)
1163                 goto err;
1164
1165         drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1166
1167         do {
1168                 u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {};
1169                 u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
1170                 bool same_before_adjust, max_swing_reached;
1171
1172                 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1173                                            &phy_cfg);
1174                 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1175                 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1176                 phy_cfg.dp.set_lanes = false;
1177                 phy_cfg.dp.set_rate = false;
1178                 phy_cfg.dp.set_voltages = true;
1179                 ret = phy_configure(mhdp->phy,  &phy_cfg);
1180                 if (ret) {
1181                         dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1182                                 __func__, ret);
1183                         goto err;
1184                 }
1185
1186                 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
1187                                     lanes_data, link_status);
1188
1189                 cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
1190                                       &max_swing_reached, lanes_data,
1191                                       link_status,
1192                                       requested_adjust_volt_swing,
1193                                       requested_adjust_pre_emphasis);
1194
1195                 if (max_swing_reached) {
1196                         dev_err(mhdp->dev, "CR: max swing reached\n");
1197                         goto err;
1198                 }
1199
1200                 if (cr_done) {
1201                         cdns_mhdp_print_lt_status("CR phase ok", mhdp,
1202                                                   &phy_cfg);
1203                         return true;
1204                 }
1205
1206                 /* Not all CR_DONE bits set */
1207                 fail_counter_cr_long++;
1208
1209                 if (same_before_adjust) {
1210                         fail_counter_short++;
1211                         continue;
1212                 }
1213
1214                 fail_counter_short = 0;
1215                 /*
1216                  * Voltage swing/pre-emphasis adjust requested
1217                  * during CR phase
1218                  */
1219                 cdns_mhdp_adjust_requested_cr(mhdp, link_status,
1220                                               requested_adjust_volt_swing,
1221                                               requested_adjust_pre_emphasis);
1222         } while (fail_counter_short < 5 && fail_counter_cr_long < 10);
1223
1224 err:
1225         cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg);
1226
1227         return false;
1228 }
1229
1230 static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link)
1231 {
1232         switch (drm_dp_link_rate_to_bw_code(link->rate)) {
1233         case DP_LINK_BW_2_7:
1234                 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62);
1235                 break;
1236         case DP_LINK_BW_5_4:
1237                 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7);
1238                 break;
1239         case DP_LINK_BW_8_1:
1240                 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
1241                 break;
1242         }
1243 }
1244
1245 static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp,
1246                                    unsigned int training_interval)
1247 {
1248         u32 reg32;
1249         const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp);
1250         int ret;
1251
1252         while (1) {
1253                 if (!cdns_mhdp_link_training_cr(mhdp)) {
1254                         if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1255                             DP_LINK_BW_1_62) {
1256                                 dev_dbg(mhdp->dev,
1257                                         "Reducing link rate during CR phase\n");
1258                                 cdns_mhdp_lower_link_rate(&mhdp->link);
1259
1260                                 continue;
1261                         } else if (mhdp->link.num_lanes > 1) {
1262                                 dev_dbg(mhdp->dev,
1263                                         "Reducing lanes number during CR phase\n");
1264                                 mhdp->link.num_lanes >>= 1;
1265                                 mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1266
1267                                 continue;
1268                         }
1269
1270                         dev_err(mhdp->dev,
1271                                 "Link training failed during CR phase\n");
1272                         goto err;
1273                 }
1274
1275                 if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps,
1276                                                        training_interval))
1277                         break;
1278
1279                 if (mhdp->link.num_lanes > 1) {
1280                         dev_dbg(mhdp->dev,
1281                                 "Reducing lanes number during EQ phase\n");
1282                         mhdp->link.num_lanes >>= 1;
1283
1284                         continue;
1285                 } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1286                            DP_LINK_BW_1_62) {
1287                         dev_dbg(mhdp->dev,
1288                                 "Reducing link rate during EQ phase\n");
1289                         cdns_mhdp_lower_link_rate(&mhdp->link);
1290                         mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1291
1292                         continue;
1293                 }
1294
1295                 dev_err(mhdp->dev, "Link training failed during EQ phase\n");
1296                 goto err;
1297         }
1298
1299         dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n",
1300                 mhdp->link.num_lanes, mhdp->link.rate / 100);
1301
1302         drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1303                            mhdp->host.scrambler ? 0 :
1304                            DP_LINK_SCRAMBLING_DISABLE);
1305
1306         ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &reg32);
1307         if (ret < 0) {
1308                 dev_err(mhdp->dev,
1309                         "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1310                         ret);
1311                 return ret;
1312         }
1313         reg32 &= ~GENMASK(1, 0);
1314         reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes);
1315         reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC;
1316         reg32 |= CDNS_DP_FRAMER_EN;
1317         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32);
1318
1319         /* Reset PHY config */
1320         reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1321         if (!mhdp->host.scrambler)
1322                 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1323         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1324
1325         return 0;
1326 err:
1327         /* Reset PHY config */
1328         reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1329         if (!mhdp->host.scrambler)
1330                 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1331         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1332
1333         drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1334                            DP_TRAINING_PATTERN_DISABLE);
1335
1336         return -EIO;
1337 }
1338
1339 static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp,
1340                                               u32 interval)
1341 {
1342         if (interval == 0)
1343                 return 400;
1344         if (interval < 5)
1345                 return 4000 << (interval - 1);
1346         dev_err(mhdp->dev,
1347                 "wrong training interval returned by DPCD: %d\n", interval);
1348         return 0;
1349 }
1350
1351 static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp)
1352 {
1353         unsigned int link_rate;
1354
1355         /* Get source capabilities based on PHY attributes */
1356
1357         mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width;
1358         if (!mhdp->host.lanes_cnt)
1359                 mhdp->host.lanes_cnt = 4;
1360
1361         link_rate = mhdp->phy->attrs.max_link_rate;
1362         if (!link_rate)
1363                 link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1);
1364         else
1365                 /* PHY uses Mb/s, DRM uses tens of kb/s. */
1366                 link_rate *= 100;
1367
1368         mhdp->host.link_rate = link_rate;
1369         mhdp->host.volt_swing = CDNS_VOLT_SWING(3);
1370         mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3);
1371         mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) |
1372                                   CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
1373                                   CDNS_SUPPORT_TPS(4);
1374         mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL;
1375         mhdp->host.fast_link = false;
1376         mhdp->host.enhanced = true;
1377         mhdp->host.scrambler = true;
1378         mhdp->host.ssc = false;
1379 }
1380
1381 static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp,
1382                                      u8 dpcd[DP_RECEIVER_CAP_SIZE])
1383 {
1384         mhdp->sink.link_rate = mhdp->link.rate;
1385         mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
1386         mhdp->sink.enhanced = !!(mhdp->link.capabilities &
1387                                  DP_LINK_CAP_ENHANCED_FRAMING);
1388
1389         /* Set SSC support */
1390         mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] &
1391                                   DP_MAX_DOWNSPREAD_0_5);
1392
1393         /* Set TPS support */
1394         mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
1395         if (drm_dp_tps3_supported(dpcd))
1396                 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
1397         if (drm_dp_tps4_supported(dpcd))
1398                 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
1399
1400         /* Set fast link support */
1401         mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] &
1402                                   DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
1403 }
1404
1405 static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
1406 {
1407         u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2];
1408         u32 resp, interval, interval_us;
1409         u8 ext_cap_chk = 0;
1410         unsigned int addr;
1411         int err;
1412
1413         WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1414
1415         drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL,
1416                           &ext_cap_chk);
1417
1418         if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)
1419                 addr = DP_DP13_DPCD_REV;
1420         else
1421                 addr = DP_DPCD_REV;
1422
1423         err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE);
1424         if (err < 0) {
1425                 dev_err(mhdp->dev, "Failed to read receiver capabilities\n");
1426                 return err;
1427         }
1428
1429         mhdp->link.revision = dpcd[0];
1430         mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]);
1431         mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
1432
1433         if (dpcd[2] & DP_ENHANCED_FRAME_CAP)
1434                 mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
1435
1436         dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
1437         cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
1438
1439         cdns_mhdp_fill_sink_caps(mhdp, dpcd);
1440
1441         mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1442         mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1443
1444         /* Disable framer for link training */
1445         err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
1446         if (err < 0) {
1447                 dev_err(mhdp->dev,
1448                         "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1449                         err);
1450                 return err;
1451         }
1452
1453         resp &= ~CDNS_DP_FRAMER_EN;
1454         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
1455
1456         /* Spread AMP if required, enable 8b/10b coding */
1457         amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0;
1458         amp[1] = DP_SET_ANSI_8B10B;
1459         drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2);
1460
1461         if (mhdp->host.fast_link & mhdp->sink.fast_link) {
1462                 dev_err(mhdp->dev, "fastlink not supported\n");
1463                 return -EOPNOTSUPP;
1464         }
1465
1466         interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK;
1467         interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval);
1468         if (!interval_us ||
1469             cdns_mhdp_link_training(mhdp, interval_us)) {
1470                 dev_err(mhdp->dev, "Link training failed. Exiting.\n");
1471                 return -EIO;
1472         }
1473
1474         mhdp->link_up = true;
1475
1476         return 0;
1477 }
1478
1479 static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
1480 {
1481         WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1482
1483         if (mhdp->plugged)
1484                 cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
1485
1486         mhdp->link_up = false;
1487 }
1488
1489 static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp,
1490                                        struct drm_connector *connector)
1491 {
1492         if (!mhdp->plugged)
1493                 return NULL;
1494
1495         return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp);
1496 }
1497
1498 static int cdns_mhdp_get_modes(struct drm_connector *connector)
1499 {
1500         struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
1501         struct edid *edid;
1502         int num_modes;
1503
1504         if (!mhdp->plugged)
1505                 return 0;
1506
1507         edid = cdns_mhdp_get_edid(mhdp, connector);
1508         if (!edid) {
1509                 dev_err(mhdp->dev, "Failed to read EDID\n");
1510                 return 0;
1511         }
1512
1513         drm_connector_update_edid_property(connector, edid);
1514         num_modes = drm_add_edid_modes(connector, edid);
1515         kfree(edid);
1516
1517         /*
1518          * HACK: Warn about unsupported display formats until we deal
1519          *       with them correctly.
1520          */
1521         if (connector->display_info.color_formats &&
1522             !(connector->display_info.color_formats &
1523               mhdp->display_fmt.color_format))
1524                 dev_warn(mhdp->dev,
1525                          "%s: No supported color_format found (0x%08x)\n",
1526                         __func__, connector->display_info.color_formats);
1527
1528         if (connector->display_info.bpc &&
1529             connector->display_info.bpc < mhdp->display_fmt.bpc)
1530                 dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
1531                          __func__, connector->display_info.bpc,
1532                          mhdp->display_fmt.bpc);
1533
1534         return num_modes;
1535 }
1536
1537 static int cdns_mhdp_connector_detect(struct drm_connector *conn,
1538                                       struct drm_modeset_acquire_ctx *ctx,
1539                                       bool force)
1540 {
1541         struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1542
1543         return cdns_mhdp_detect(mhdp);
1544 }
1545
1546 static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
1547 {
1548         u32 bpp;
1549
1550         if (fmt->y_only)
1551                 return fmt->bpc;
1552
1553         switch (fmt->color_format) {
1554         case DRM_COLOR_FORMAT_RGB444:
1555         case DRM_COLOR_FORMAT_YCRCB444:
1556                 bpp = fmt->bpc * 3;
1557                 break;
1558         case DRM_COLOR_FORMAT_YCRCB422:
1559                 bpp = fmt->bpc * 2;
1560                 break;
1561         case DRM_COLOR_FORMAT_YCRCB420:
1562                 bpp = fmt->bpc * 3 / 2;
1563                 break;
1564         default:
1565                 bpp = fmt->bpc * 3;
1566                 WARN_ON(1);
1567         }
1568         return bpp;
1569 }
1570
1571 static
1572 bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
1573                             const struct drm_display_mode *mode,
1574                             unsigned int lanes, unsigned int rate)
1575 {
1576         u32 max_bw, req_bw, bpp;
1577
1578         /*
1579          * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8
1580          * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the
1581          * value thus equals the bandwidth in 10kb/s units, which matches the
1582          * units of the rate parameter.
1583          */
1584
1585         bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1586         req_bw = mode->clock * bpp / 8;
1587         max_bw = lanes * rate;
1588         if (req_bw > max_bw) {
1589                 dev_dbg(mhdp->dev,
1590                         "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n",
1591                         mode->name, req_bw, max_bw);
1592
1593                 return false;
1594         }
1595
1596         return true;
1597 }
1598
1599 static
1600 enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
1601                                           struct drm_display_mode *mode)
1602 {
1603         struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1604
1605         mutex_lock(&mhdp->link_mutex);
1606
1607         if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
1608                                     mhdp->link.rate)) {
1609                 mutex_unlock(&mhdp->link_mutex);
1610                 return MODE_CLOCK_HIGH;
1611         }
1612
1613         mutex_unlock(&mhdp->link_mutex);
1614         return MODE_OK;
1615 }
1616
1617 static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
1618         .detect_ctx = cdns_mhdp_connector_detect,
1619         .get_modes = cdns_mhdp_get_modes,
1620         .mode_valid = cdns_mhdp_mode_valid,
1621 };
1622
1623 static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
1624         .fill_modes = drm_helper_probe_single_connector_modes,
1625         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1626         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1627         .reset = drm_atomic_helper_connector_reset,
1628         .destroy = drm_connector_cleanup,
1629 };
1630
1631 static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
1632 {
1633         u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
1634         struct drm_connector *conn = &mhdp->connector;
1635         struct drm_bridge *bridge = &mhdp->bridge;
1636         int ret;
1637
1638         if (!bridge->encoder) {
1639                 dev_err(mhdp->dev, "Parent encoder object not found");
1640                 return -ENODEV;
1641         }
1642
1643         conn->polled = DRM_CONNECTOR_POLL_HPD;
1644
1645         ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,
1646                                  DRM_MODE_CONNECTOR_DisplayPort);
1647         if (ret) {
1648                 dev_err(mhdp->dev, "Failed to initialize connector with drm\n");
1649                 return ret;
1650         }
1651
1652         drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs);
1653
1654         ret = drm_display_info_set_bus_formats(&conn->display_info,
1655                                                &bus_format, 1);
1656         if (ret)
1657                 return ret;
1658
1659         ret = drm_connector_attach_encoder(conn, bridge->encoder);
1660         if (ret) {
1661                 dev_err(mhdp->dev, "Failed to attach connector to encoder\n");
1662                 return ret;
1663         }
1664
1665         return 0;
1666 }
1667
1668 static int cdns_mhdp_attach(struct drm_bridge *bridge,
1669                             enum drm_bridge_attach_flags flags)
1670 {
1671         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1672         bool hw_ready;
1673         int ret;
1674
1675         dev_dbg(mhdp->dev, "%s\n", __func__);
1676
1677         if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
1678                 ret = cdns_mhdp_connector_init(mhdp);
1679                 if (ret)
1680                         return ret;
1681         }
1682
1683         spin_lock(&mhdp->start_lock);
1684
1685         mhdp->bridge_attached = true;
1686         hw_ready = mhdp->hw_state == MHDP_HW_READY;
1687
1688         spin_unlock(&mhdp->start_lock);
1689
1690         /* Enable SW event interrupts */
1691         if (hw_ready)
1692                 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
1693                        mhdp->regs + CDNS_APB_INT_MASK);
1694
1695         return 0;
1696 }
1697
1698 static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
1699                                       const struct drm_display_mode *mode)
1700 {
1701         unsigned int dp_framer_sp = 0, msa_horizontal_1,
1702                 msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl,
1703                 misc0 = 0, misc1 = 0, pxl_repr,
1704                 front_porch, back_porch, msa_h0, msa_v0, hsync, vsync,
1705                 dp_vertical_1;
1706         u8 stream_id = mhdp->stream_id;
1707         u32 bpp, bpc, pxlfmt, framer;
1708         int ret;
1709
1710         pxlfmt = mhdp->display_fmt.color_format;
1711         bpc = mhdp->display_fmt.bpc;
1712
1713         /*
1714          * If YCBCR supported and stream not SD, use ITU709
1715          * Need to handle ITU version with YCBCR420 when supported
1716          */
1717         if ((pxlfmt == DRM_COLOR_FORMAT_YCRCB444 ||
1718              pxlfmt == DRM_COLOR_FORMAT_YCRCB422) && mode->crtc_vdisplay >= 720)
1719                 misc0 = DP_YCBCR_COEFFICIENTS_ITU709;
1720
1721         bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1722
1723         switch (pxlfmt) {
1724         case DRM_COLOR_FORMAT_RGB444:
1725                 pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT;
1726                 misc0 |= DP_COLOR_FORMAT_RGB;
1727                 break;
1728         case DRM_COLOR_FORMAT_YCRCB444:
1729                 pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT;
1730                 misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA;
1731                 break;
1732         case DRM_COLOR_FORMAT_YCRCB422:
1733                 pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT;
1734                 misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA;
1735                 break;
1736         case DRM_COLOR_FORMAT_YCRCB420:
1737                 pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT;
1738                 break;
1739         default:
1740                 pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT;
1741         }
1742
1743         switch (bpc) {
1744         case 6:
1745                 misc0 |= DP_TEST_BIT_DEPTH_6;
1746                 pxl_repr |= CDNS_DP_FRAMER_6_BPC;
1747                 break;
1748         case 8:
1749                 misc0 |= DP_TEST_BIT_DEPTH_8;
1750                 pxl_repr |= CDNS_DP_FRAMER_8_BPC;
1751                 break;
1752         case 10:
1753                 misc0 |= DP_TEST_BIT_DEPTH_10;
1754                 pxl_repr |= CDNS_DP_FRAMER_10_BPC;
1755                 break;
1756         case 12:
1757                 misc0 |= DP_TEST_BIT_DEPTH_12;
1758                 pxl_repr |= CDNS_DP_FRAMER_12_BPC;
1759                 break;
1760         case 16:
1761                 misc0 |= DP_TEST_BIT_DEPTH_16;
1762                 pxl_repr |= CDNS_DP_FRAMER_16_BPC;
1763                 break;
1764         }
1765
1766         bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE;
1767         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1768                 bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT;
1769
1770         cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id),
1771                             bnd_hsync2vsync);
1772
1773         hsync2vsync_pol_ctrl = 0;
1774         if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1775                 hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW;
1776         if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1777                 hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW;
1778         cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id),
1779                             hsync2vsync_pol_ctrl);
1780
1781         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr);
1782
1783         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1784                 dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE;
1785         if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1786                 dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW;
1787         if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1788                 dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW;
1789         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp);
1790
1791         front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
1792         back_porch = mode->crtc_htotal - mode->crtc_hsync_end;
1793         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id),
1794                             CDNS_DP_FRONT_PORCH(front_porch) |
1795                             CDNS_DP_BACK_PORCH(back_porch));
1796
1797         cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id),
1798                             mode->crtc_hdisplay * bpp / 8);
1799
1800         msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start;
1801         cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id),
1802                             CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) |
1803                             CDNS_DP_MSAH0_HSYNC_START(msa_h0));
1804
1805         hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
1806         msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) |
1807                            CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay);
1808         if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1809                 msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW;
1810         cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id),
1811                             msa_horizontal_1);
1812
1813         msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start;
1814         cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id),
1815                             CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) |
1816                             CDNS_DP_MSAV0_VSYNC_START(msa_v0));
1817
1818         vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
1819         msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) |
1820                          CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay);
1821         if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1822                 msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW;
1823         cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id),
1824                             msa_vertical_1);
1825
1826         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1827             mode->crtc_vtotal % 2 == 0)
1828                 misc1 = DP_TEST_INTERLACED;
1829         if (mhdp->display_fmt.y_only)
1830                 misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY;
1831         /* Use VSC SDP for Y420 */
1832         if (pxlfmt == DRM_COLOR_FORMAT_YCRCB420)
1833                 misc1 = CDNS_DP_TEST_VSC_SDP;
1834
1835         cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id),
1836                             misc0 | (misc1 << 8));
1837
1838         cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id),
1839                             CDNS_DP_H_HSYNC_WIDTH(hsync) |
1840                             CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay));
1841
1842         cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id),
1843                             CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) |
1844                             CDNS_DP_V0_VSTART(msa_v0));
1845
1846         dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal);
1847         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1848             mode->crtc_vtotal % 2 == 0)
1849                 dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN;
1850
1851         cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1);
1852
1853         cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1,
1854                                 (mode->flags & DRM_MODE_FLAG_INTERLACE) ?
1855                                 CDNS_DP_VB_ID_INTERLACED : 0);
1856
1857         ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer);
1858         if (ret < 0) {
1859                 dev_err(mhdp->dev,
1860                         "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1861                         ret);
1862                 return;
1863         }
1864         framer |= CDNS_DP_FRAMER_EN;
1865         framer &= ~CDNS_DP_NO_VIDEO_MODE;
1866         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer);
1867 }
1868
1869 static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp,
1870                                  const struct drm_display_mode *mode)
1871 {
1872         u32 rate, vs, required_bandwidth, available_bandwidth;
1873         s32 line_thresh1, line_thresh2, line_thresh = 0;
1874         int pxlclock = mode->crtc_clock;
1875         u32 tu_size = 64;
1876         u32 bpp;
1877
1878         /* Get rate in MSymbols per second per lane */
1879         rate = mhdp->link.rate / 1000;
1880
1881         bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1882
1883         required_bandwidth = pxlclock * bpp / 8;
1884         available_bandwidth = mhdp->link.num_lanes * rate;
1885
1886         vs = tu_size * required_bandwidth / available_bandwidth;
1887         vs /= 1000;
1888
1889         if (vs == tu_size)
1890                 vs = tu_size - 1;
1891
1892         line_thresh1 = ((vs + 1) << 5) * 8 / bpp;
1893         line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5);
1894         line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes;
1895         line_thresh = (line_thresh >> 5) + 2;
1896
1897         mhdp->stream_id = 0;
1898
1899         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
1900                             CDNS_DP_FRAMER_TU_VS(vs) |
1901                             CDNS_DP_FRAMER_TU_SIZE(tu_size) |
1902                             CDNS_DP_FRAMER_TU_CNT_RST_EN);
1903
1904         cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0),
1905                             line_thresh & GENMASK(5, 0));
1906
1907         cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0),
1908                             CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ?
1909                                                    0 : tu_size - vs));
1910
1911         cdns_mhdp_configure_video(mhdp, mode);
1912 }
1913
1914 static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
1915                                     struct drm_bridge_state *bridge_state)
1916 {
1917         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1918         struct drm_atomic_state *state = bridge_state->base.state;
1919         struct cdns_mhdp_bridge_state *mhdp_state;
1920         struct drm_crtc_state *crtc_state;
1921         struct drm_connector *connector;
1922         struct drm_connector_state *conn_state;
1923         struct drm_bridge_state *new_state;
1924         const struct drm_display_mode *mode;
1925         u32 resp;
1926         int ret;
1927
1928         dev_dbg(mhdp->dev, "bridge enable\n");
1929
1930         mutex_lock(&mhdp->link_mutex);
1931
1932         if (mhdp->plugged && !mhdp->link_up) {
1933                 ret = cdns_mhdp_link_up(mhdp);
1934                 if (ret < 0)
1935                         goto out;
1936         }
1937
1938         if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable)
1939                 mhdp->info->ops->enable(mhdp);
1940
1941         /* Enable VIF clock for stream 0 */
1942         ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
1943         if (ret < 0) {
1944                 dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret);
1945                 goto out;
1946         }
1947
1948         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
1949                             resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN);
1950
1951         connector = drm_atomic_get_new_connector_for_encoder(state,
1952                                                              bridge->encoder);
1953         if (WARN_ON(!connector))
1954                 goto out;
1955
1956         conn_state = drm_atomic_get_new_connector_state(state, connector);
1957         if (WARN_ON(!conn_state))
1958                 goto out;
1959
1960         crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
1961         if (WARN_ON(!crtc_state))
1962                 goto out;
1963
1964         mode = &crtc_state->adjusted_mode;
1965
1966         new_state = drm_atomic_get_new_bridge_state(state, bridge);
1967         if (WARN_ON(!new_state))
1968                 goto out;
1969
1970         if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
1971                                     mhdp->link.rate)) {
1972                 ret = -EINVAL;
1973                 goto out;
1974         }
1975
1976         cdns_mhdp_sst_enable(mhdp, mode);
1977
1978         mhdp_state = to_cdns_mhdp_bridge_state(new_state);
1979
1980         mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
1981         drm_mode_set_name(mhdp_state->current_mode);
1982
1983         dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
1984
1985         mhdp->bridge_enabled = true;
1986
1987 out:
1988         mutex_unlock(&mhdp->link_mutex);
1989         if (ret < 0)
1990                 schedule_work(&mhdp->modeset_retry_work);
1991 }
1992
1993 static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
1994                                      struct drm_bridge_state *bridge_state)
1995 {
1996         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1997         u32 resp;
1998
1999         dev_dbg(mhdp->dev, "%s\n", __func__);
2000
2001         mutex_lock(&mhdp->link_mutex);
2002
2003         mhdp->bridge_enabled = false;
2004         cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
2005         resp &= ~CDNS_DP_FRAMER_EN;
2006         resp |= CDNS_DP_NO_VIDEO_MODE;
2007         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
2008
2009         cdns_mhdp_link_down(mhdp);
2010
2011         /* Disable VIF clock for stream 0 */
2012         cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
2013         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2014                             resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
2015
2016         if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable)
2017                 mhdp->info->ops->disable(mhdp);
2018
2019         mutex_unlock(&mhdp->link_mutex);
2020 }
2021
2022 static void cdns_mhdp_detach(struct drm_bridge *bridge)
2023 {
2024         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2025
2026         dev_dbg(mhdp->dev, "%s\n", __func__);
2027
2028         spin_lock(&mhdp->start_lock);
2029
2030         mhdp->bridge_attached = false;
2031
2032         spin_unlock(&mhdp->start_lock);
2033
2034         writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2035 }
2036
2037 static struct drm_bridge_state *
2038 cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
2039 {
2040         struct cdns_mhdp_bridge_state *state;
2041
2042         state = kzalloc(sizeof(*state), GFP_KERNEL);
2043         if (!state)
2044                 return NULL;
2045
2046         __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
2047
2048         return &state->base;
2049 }
2050
2051 static void
2052 cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge,
2053                                       struct drm_bridge_state *state)
2054 {
2055         struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2056
2057         cdns_mhdp_state = to_cdns_mhdp_bridge_state(state);
2058
2059         if (cdns_mhdp_state->current_mode) {
2060                 drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode);
2061                 cdns_mhdp_state->current_mode = NULL;
2062         }
2063
2064         kfree(cdns_mhdp_state);
2065 }
2066
2067 static struct drm_bridge_state *
2068 cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge)
2069 {
2070         struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2071
2072         cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL);
2073         if (!cdns_mhdp_state)
2074                 return NULL;
2075
2076          __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base);
2077
2078         return &cdns_mhdp_state->base;
2079 }
2080
2081 static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
2082                                   struct drm_bridge_state *bridge_state,
2083                                   struct drm_crtc_state *crtc_state,
2084                                   struct drm_connector_state *conn_state)
2085 {
2086         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2087         const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
2088
2089         mutex_lock(&mhdp->link_mutex);
2090
2091         if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2092                                     mhdp->link.rate)) {
2093                 dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
2094                         __func__, mode->name, mhdp->link.num_lanes,
2095                         mhdp->link.rate / 100);
2096                 mutex_unlock(&mhdp->link_mutex);
2097                 return -EINVAL;
2098         }
2099
2100         mutex_unlock(&mhdp->link_mutex);
2101         return 0;
2102 }
2103
2104 static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge)
2105 {
2106         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2107
2108         return cdns_mhdp_detect(mhdp);
2109 }
2110
2111 static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge,
2112                                               struct drm_connector *connector)
2113 {
2114         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2115
2116         return cdns_mhdp_get_edid(mhdp, connector);
2117 }
2118
2119 static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
2120 {
2121         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2122
2123         /* Enable SW event interrupts */
2124         if (mhdp->bridge_attached)
2125                 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
2126                        mhdp->regs + CDNS_APB_INT_MASK);
2127 }
2128
2129 static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
2130 {
2131         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2132
2133         writel(CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK);
2134 }
2135
2136 static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
2137         .atomic_enable = cdns_mhdp_atomic_enable,
2138         .atomic_disable = cdns_mhdp_atomic_disable,
2139         .atomic_check = cdns_mhdp_atomic_check,
2140         .attach = cdns_mhdp_attach,
2141         .detach = cdns_mhdp_detach,
2142         .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state,
2143         .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state,
2144         .atomic_reset = cdns_mhdp_bridge_atomic_reset,
2145         .detect = cdns_mhdp_bridge_detect,
2146         .get_edid = cdns_mhdp_bridge_get_edid,
2147         .hpd_enable = cdns_mhdp_bridge_hpd_enable,
2148         .hpd_disable = cdns_mhdp_bridge_hpd_disable,
2149 };
2150
2151 static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse)
2152 {
2153         int hpd_event, hpd_status;
2154
2155         *hpd_pulse = false;
2156
2157         hpd_event = cdns_mhdp_read_hpd_event(mhdp);
2158
2159         /* Getting event bits failed, bail out */
2160         if (hpd_event < 0) {
2161                 dev_warn(mhdp->dev, "%s: read event failed: %d\n",
2162                          __func__, hpd_event);
2163                 return false;
2164         }
2165
2166         hpd_status = cdns_mhdp_get_hpd_status(mhdp);
2167         if (hpd_status < 0) {
2168                 dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n",
2169                          __func__, hpd_status);
2170                 return false;
2171         }
2172
2173         if (hpd_event & DPTX_READ_EVENT_HPD_PULSE)
2174                 *hpd_pulse = true;
2175
2176         return !!hpd_status;
2177 }
2178
2179 static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
2180 {
2181         struct cdns_mhdp_bridge_state *cdns_bridge_state;
2182         struct drm_display_mode *current_mode;
2183         bool old_plugged = mhdp->plugged;
2184         struct drm_bridge_state *state;
2185         u8 status[DP_LINK_STATUS_SIZE];
2186         bool hpd_pulse;
2187         int ret = 0;
2188
2189         mutex_lock(&mhdp->link_mutex);
2190
2191         mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse);
2192
2193         if (!mhdp->plugged) {
2194                 cdns_mhdp_link_down(mhdp);
2195                 mhdp->link.rate = mhdp->host.link_rate;
2196                 mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2197                 goto out;
2198         }
2199
2200         /*
2201          * If we get a HPD pulse event and we were and still are connected,
2202          * check the link status. If link status is ok, there's nothing to do
2203          * as we don't handle DP interrupts. If link status is bad, continue
2204          * with full link setup.
2205          */
2206         if (hpd_pulse && old_plugged == mhdp->plugged) {
2207                 ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
2208
2209                 /*
2210                  * If everything looks fine, just return, as we don't handle
2211                  * DP IRQs.
2212                  */
2213                 if (ret > 0 &&
2214                     drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
2215                     drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
2216                         goto out;
2217
2218                 /* If link is bad, mark link as down so that we do a new LT */
2219                 mhdp->link_up = false;
2220         }
2221
2222         if (!mhdp->link_up) {
2223                 ret = cdns_mhdp_link_up(mhdp);
2224                 if (ret < 0)
2225                         goto out;
2226         }
2227
2228         if (mhdp->bridge_enabled) {
2229                 state = drm_priv_to_bridge_state(mhdp->bridge.base.state);
2230                 if (!state) {
2231                         ret = -EINVAL;
2232                         goto out;
2233                 }
2234
2235                 cdns_bridge_state = to_cdns_mhdp_bridge_state(state);
2236                 if (!cdns_bridge_state) {
2237                         ret = -EINVAL;
2238                         goto out;
2239                 }
2240
2241                 current_mode = cdns_bridge_state->current_mode;
2242                 if (!current_mode) {
2243                         ret = -EINVAL;
2244                         goto out;
2245                 }
2246
2247                 if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes,
2248                                             mhdp->link.rate)) {
2249                         ret = -EINVAL;
2250                         goto out;
2251                 }
2252
2253                 dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__,
2254                         current_mode->name);
2255
2256                 cdns_mhdp_sst_enable(mhdp, current_mode);
2257         }
2258 out:
2259         mutex_unlock(&mhdp->link_mutex);
2260         return ret;
2261 }
2262
2263 static void cdns_mhdp_modeset_retry_fn(struct work_struct *work)
2264 {
2265         struct cdns_mhdp_device *mhdp;
2266         struct drm_connector *conn;
2267
2268         mhdp = container_of(work, typeof(*mhdp), modeset_retry_work);
2269
2270         conn = &mhdp->connector;
2271
2272         /* Grab the locks before changing connector property */
2273         mutex_lock(&conn->dev->mode_config.mutex);
2274
2275         /*
2276          * Set connector link status to BAD and send a Uevent to notify
2277          * userspace to do a modeset.
2278          */
2279         drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD);
2280         mutex_unlock(&conn->dev->mode_config.mutex);
2281
2282         /* Send Hotplug uevent so userspace can reprobe */
2283         drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2284 }
2285
2286 static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data)
2287 {
2288         struct cdns_mhdp_device *mhdp = data;
2289         u32 apb_stat, sw_ev0;
2290         bool bridge_attached;
2291         int ret;
2292
2293         apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
2294         if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT))
2295                 return IRQ_NONE;
2296
2297         sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0);
2298
2299         /*
2300          *  Calling drm_kms_helper_hotplug_event() when not attached
2301          *  to drm device causes an oops because the drm_bridge->dev
2302          *  is NULL. See cdns_mhdp_fw_cb() comments for details about the
2303          *  problems related drm_kms_helper_hotplug_event() call.
2304          */
2305         spin_lock(&mhdp->start_lock);
2306         bridge_attached = mhdp->bridge_attached;
2307         spin_unlock(&mhdp->start_lock);
2308
2309         if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
2310                 ret = cdns_mhdp_update_link_status(mhdp);
2311                 if (mhdp->connector.dev) {
2312                         if (ret < 0)
2313                                 schedule_work(&mhdp->modeset_retry_work);
2314                         else
2315                                 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2316                 } else {
2317                         drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
2318                 }
2319         }
2320
2321         return IRQ_HANDLED;
2322 }
2323
2324 static int cdns_mhdp_probe(struct platform_device *pdev)
2325 {
2326         struct device *dev = &pdev->dev;
2327         struct cdns_mhdp_device *mhdp;
2328         unsigned long rate;
2329         struct clk *clk;
2330         int ret;
2331         int irq;
2332
2333         mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
2334         if (!mhdp)
2335                 return -ENOMEM;
2336
2337         clk = devm_clk_get(dev, NULL);
2338         if (IS_ERR(clk)) {
2339                 dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
2340                 return PTR_ERR(clk);
2341         }
2342
2343         mhdp->clk = clk;
2344         mhdp->dev = dev;
2345         mutex_init(&mhdp->mbox_mutex);
2346         mutex_init(&mhdp->link_mutex);
2347         spin_lock_init(&mhdp->start_lock);
2348
2349         drm_dp_aux_init(&mhdp->aux);
2350         mhdp->aux.dev = dev;
2351         mhdp->aux.transfer = cdns_mhdp_transfer;
2352
2353         mhdp->regs = devm_platform_ioremap_resource(pdev, 0);
2354         if (IS_ERR(mhdp->regs)) {
2355                 dev_err(dev, "Failed to get memory resource\n");
2356                 return PTR_ERR(mhdp->regs);
2357         }
2358
2359         mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0);
2360         if (IS_ERR(mhdp->phy)) {
2361                 dev_err(dev, "no PHY configured\n");
2362                 return PTR_ERR(mhdp->phy);
2363         }
2364
2365         platform_set_drvdata(pdev, mhdp);
2366
2367         mhdp->info = of_device_get_match_data(dev);
2368
2369         clk_prepare_enable(clk);
2370
2371         pm_runtime_enable(dev);
2372         ret = pm_runtime_resume_and_get(dev);
2373         if (ret < 0) {
2374                 dev_err(dev, "pm_runtime_resume_and_get failed\n");
2375                 pm_runtime_disable(dev);
2376                 goto clk_disable;
2377         }
2378
2379         if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
2380                 ret = mhdp->info->ops->init(mhdp);
2381                 if (ret != 0) {
2382                         dev_err(dev, "MHDP platform initialization failed: %d\n",
2383                                 ret);
2384                         goto runtime_put;
2385                 }
2386         }
2387
2388         rate = clk_get_rate(clk);
2389         writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L);
2390         writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H);
2391
2392         dev_dbg(dev, "func clk rate %lu Hz\n", rate);
2393
2394         writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2395
2396         irq = platform_get_irq(pdev, 0);
2397         ret = devm_request_threaded_irq(mhdp->dev, irq, NULL,
2398                                         cdns_mhdp_irq_handler, IRQF_ONESHOT,
2399                                         "mhdp8546", mhdp);
2400         if (ret) {
2401                 dev_err(dev, "cannot install IRQ %d\n", irq);
2402                 ret = -EIO;
2403                 goto plat_fini;
2404         }
2405
2406         cdns_mhdp_fill_host_caps(mhdp);
2407
2408         /* Initialize link rate and num of lanes to host values */
2409         mhdp->link.rate = mhdp->host.link_rate;
2410         mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2411
2412         /* The only currently supported format */
2413         mhdp->display_fmt.y_only = false;
2414         mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444;
2415         mhdp->display_fmt.bpc = 8;
2416
2417         mhdp->bridge.of_node = pdev->dev.of_node;
2418         mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
2419         mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
2420                            DRM_BRIDGE_OP_HPD;
2421         mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
2422         if (mhdp->info)
2423                 mhdp->bridge.timings = mhdp->info->timings;
2424
2425         ret = phy_init(mhdp->phy);
2426         if (ret) {
2427                 dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret);
2428                 goto plat_fini;
2429         }
2430
2431         /* Initialize the work for modeset in case of link train failure */
2432         INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn);
2433
2434         init_waitqueue_head(&mhdp->fw_load_wq);
2435
2436         ret = cdns_mhdp_load_firmware(mhdp);
2437         if (ret)
2438                 goto phy_exit;
2439
2440         drm_bridge_add(&mhdp->bridge);
2441
2442         return 0;
2443
2444 phy_exit:
2445         phy_exit(mhdp->phy);
2446 plat_fini:
2447         if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2448                 mhdp->info->ops->exit(mhdp);
2449 runtime_put:
2450         pm_runtime_put_sync(dev);
2451         pm_runtime_disable(dev);
2452 clk_disable:
2453         clk_disable_unprepare(mhdp->clk);
2454
2455         return ret;
2456 }
2457
2458 static int cdns_mhdp_remove(struct platform_device *pdev)
2459 {
2460         struct cdns_mhdp_device *mhdp = dev_get_drvdata(&pdev->dev);
2461         unsigned long timeout = msecs_to_jiffies(100);
2462         bool stop_fw = false;
2463         int ret;
2464
2465         drm_bridge_remove(&mhdp->bridge);
2466
2467         ret = wait_event_timeout(mhdp->fw_load_wq,
2468                                  mhdp->hw_state == MHDP_HW_READY,
2469                                  timeout);
2470         if (ret == 0)
2471                 dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
2472                         __func__);
2473         else
2474                 stop_fw = true;
2475
2476         spin_lock(&mhdp->start_lock);
2477         mhdp->hw_state = MHDP_HW_STOPPED;
2478         spin_unlock(&mhdp->start_lock);
2479
2480         if (stop_fw)
2481                 ret = cdns_mhdp_set_firmware_active(mhdp, false);
2482
2483         phy_exit(mhdp->phy);
2484
2485         if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2486                 mhdp->info->ops->exit(mhdp);
2487
2488         pm_runtime_put_sync(&pdev->dev);
2489         pm_runtime_disable(&pdev->dev);
2490
2491         cancel_work_sync(&mhdp->modeset_retry_work);
2492         flush_scheduled_work();
2493
2494         clk_disable_unprepare(mhdp->clk);
2495
2496         return ret;
2497 }
2498
2499 static const struct of_device_id mhdp_ids[] = {
2500         { .compatible = "cdns,mhdp8546", },
2501 #ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
2502         { .compatible = "ti,j721e-mhdp8546",
2503           .data = &(const struct cdns_mhdp_platform_info) {
2504                   .timings = &mhdp_ti_j721e_bridge_timings,
2505                   .ops = &mhdp_ti_j721e_ops,
2506           },
2507         },
2508 #endif
2509         { /* sentinel */ }
2510 };
2511 MODULE_DEVICE_TABLE(of, mhdp_ids);
2512
2513 static struct platform_driver mhdp_driver = {
2514         .driver = {
2515                 .name           = "cdns-mhdp8546",
2516                 .of_match_table = of_match_ptr(mhdp_ids),
2517         },
2518         .probe  = cdns_mhdp_probe,
2519         .remove = cdns_mhdp_remove,
2520 };
2521 module_platform_driver(mhdp_driver);
2522
2523 /*(DEBLOBBED)*/
2524
2525 MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
2526 MODULE_AUTHOR("Swapnil Jakhade <sjakhade@cadence.com>");
2527 MODULE_AUTHOR("Yuti Amonkar <yamonkar@cadence.com>");
2528 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
2529 MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
2530 MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver");
2531 MODULE_LICENSE("GPL");
2532 MODULE_ALIAS("platform:cdns-mhdp8546");