GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / net / phy / nxp-c45-tja11xx.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3  * Copyright 2021-2023 NXP
4  * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5  */
6
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/phy.h>
14 #include <linux/processor.h>
15 #include <linux/property.h>
16 #include <linux/ptp_classify.h>
17 #include <linux/net_tstamp.h>
18
19 #include "nxp-c45-tja11xx.h"
20
21 #define PHY_ID_TJA_1103                 0x001BB010
22 #define PHY_ID_TJA_1120                 0x001BB031
23
24 #define VEND1_DEVICE_CONTROL            0x0040
25 #define DEVICE_CONTROL_RESET            BIT(15)
26 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
27 #define DEVICE_CONTROL_CONFIG_ALL_EN    BIT(13)
28
29 #define VEND1_DEVICE_CONFIG             0x0048
30
31 #define TJA1120_VEND1_EXT_TS_MODE       0x1012
32
33 #define TJA1120_GLOBAL_INFRA_IRQ_ACK    0x2C08
34 #define TJA1120_GLOBAL_INFRA_IRQ_EN     0x2C0A
35 #define TJA1120_GLOBAL_INFRA_IRQ_STATUS 0x2C0C
36 #define TJA1120_DEV_BOOT_DONE           BIT(1)
37
38 #define TJA1120_VEND1_PTP_TRIG_DATA_S   0x1070
39
40 #define TJA1120_EGRESS_TS_DATA_S        0x9060
41 #define TJA1120_EGRESS_TS_END           0x9067
42 #define TJA1120_TS_VALID                BIT(0)
43 #define TJA1120_MORE_TS                 BIT(15)
44
45 #define VEND1_PHY_IRQ_ACK               0x80A0
46 #define VEND1_PHY_IRQ_EN                0x80A1
47 #define VEND1_PHY_IRQ_STATUS            0x80A2
48 #define PHY_IRQ_LINK_EVENT              BIT(1)
49
50 #define VEND1_ALWAYS_ACCESSIBLE         0x801F
51 #define FUSA_PASS                       BIT(4)
52
53 #define VEND1_PHY_CONTROL               0x8100
54 #define PHY_CONFIG_EN                   BIT(14)
55 #define PHY_START_OP                    BIT(0)
56
57 #define VEND1_PHY_CONFIG                0x8108
58 #define PHY_CONFIG_AUTO                 BIT(0)
59
60 #define TJA1120_EPHY_RESETS             0x810A
61 #define EPHY_PCS_RESET                  BIT(3)
62
63 #define VEND1_SIGNAL_QUALITY            0x8320
64 #define SQI_VALID                       BIT(14)
65 #define SQI_MASK                        GENMASK(2, 0)
66 #define MAX_SQI                         SQI_MASK
67
68 #define CABLE_TEST_ENABLE               BIT(15)
69 #define CABLE_TEST_START                BIT(14)
70 #define CABLE_TEST_OK                   0x00
71 #define CABLE_TEST_SHORTED              0x01
72 #define CABLE_TEST_OPEN                 0x02
73 #define CABLE_TEST_UNKNOWN              0x07
74
75 #define VEND1_PORT_CONTROL              0x8040
76 #define PORT_CONTROL_EN                 BIT(14)
77
78 #define VEND1_PORT_ABILITIES            0x8046
79 #define MACSEC_ABILITY                  BIT(5)
80 #define PTP_ABILITY                     BIT(3)
81
82 #define VEND1_PORT_FUNC_IRQ_EN          0x807A
83 #define MACSEC_IRQS                     BIT(5)
84 #define PTP_IRQS                        BIT(3)
85
86 #define VEND1_PTP_IRQ_ACK               0x9008
87 #define EGR_TS_IRQ                      BIT(1)
88
89 #define VEND1_PORT_INFRA_CONTROL        0xAC00
90 #define PORT_INFRA_CONTROL_EN           BIT(14)
91
92 #define VEND1_RXID                      0xAFCC
93 #define VEND1_TXID                      0xAFCD
94 #define ID_ENABLE                       BIT(15)
95
96 #define VEND1_ABILITIES                 0xAFC4
97 #define RGMII_ID_ABILITY                BIT(15)
98 #define RGMII_ABILITY                   BIT(14)
99 #define RMII_ABILITY                    BIT(10)
100 #define REVMII_ABILITY                  BIT(9)
101 #define MII_ABILITY                     BIT(8)
102 #define SGMII_ABILITY                   BIT(0)
103
104 #define VEND1_MII_BASIC_CONFIG          0xAFC6
105 #define MII_BASIC_CONFIG_REV            BIT(4)
106 #define MII_BASIC_CONFIG_SGMII          0x9
107 #define MII_BASIC_CONFIG_RGMII          0x7
108 #define MII_BASIC_CONFIG_RMII           0x5
109 #define MII_BASIC_CONFIG_MII            0x4
110
111 #define VEND1_SYMBOL_ERROR_CNT_XTD      0x8351
112 #define EXTENDED_CNT_EN                 BIT(15)
113 #define VEND1_MONITOR_STATUS            0xAC80
114 #define MONITOR_RESET                   BIT(15)
115 #define VEND1_MONITOR_CONFIG            0xAC86
116 #define LOST_FRAMES_CNT_EN              BIT(9)
117 #define ALL_FRAMES_CNT_EN               BIT(8)
118
119 #define VEND1_SYMBOL_ERROR_COUNTER      0x8350
120 #define VEND1_LINK_DROP_COUNTER         0x8352
121 #define VEND1_LINK_LOSSES_AND_FAILURES  0x8353
122 #define VEND1_RX_PREAMBLE_COUNT         0xAFCE
123 #define VEND1_TX_PREAMBLE_COUNT         0xAFCF
124 #define VEND1_RX_IPG_LENGTH             0xAFD0
125 #define VEND1_TX_IPG_LENGTH             0xAFD1
126 #define COUNTER_EN                      BIT(15)
127
128 #define VEND1_PTP_CONFIG                0x1102
129 #define EXT_TRG_EDGE                    BIT(1)
130
131 #define TJA1120_SYNC_TRIG_FILTER        0x1010
132 #define PTP_TRIG_RISE_TS                BIT(3)
133 #define PTP_TRIG_FALLING_TS             BIT(2)
134
135 #define CLK_RATE_ADJ_LD                 BIT(15)
136 #define CLK_RATE_ADJ_DIR                BIT(14)
137
138 #define VEND1_RX_TS_INSRT_CTRL          0x114D
139 #define TJA1103_RX_TS_INSRT_MODE2       0x02
140
141 #define TJA1120_RX_TS_INSRT_CTRL        0x9012
142 #define TJA1120_RX_TS_INSRT_EN          BIT(15)
143 #define TJA1120_TS_INSRT_MODE           BIT(4)
144
145 #define VEND1_EGR_RING_DATA_0           0x114E
146 #define VEND1_EGR_RING_CTRL             0x1154
147
148 #define RING_DATA_0_TS_VALID            BIT(15)
149
150 #define RING_DONE                       BIT(0)
151
152 #define TS_SEC_MASK                     GENMASK(1, 0)
153
154 #define PTP_ENABLE                      BIT(3)
155 #define PHY_TEST_ENABLE                 BIT(0)
156
157 #define VEND1_PORT_PTP_CONTROL          0x9000
158 #define PORT_PTP_CONTROL_BYPASS         BIT(11)
159
160 #define PTP_CLK_PERIOD_100BT1           15ULL
161 #define PTP_CLK_PERIOD_1000BT1          8ULL
162
163 #define EVENT_MSG_FILT_ALL              0x0F
164 #define EVENT_MSG_FILT_NONE             0x00
165
166 #define VEND1_GPIO_FUNC_CONFIG_BASE     0x2C40
167 #define GPIO_FUNC_EN                    BIT(15)
168 #define GPIO_FUNC_PTP                   BIT(6)
169 #define GPIO_SIGNAL_PTP_TRIGGER         0x01
170 #define GPIO_SIGNAL_PPS_OUT             0x12
171 #define GPIO_DISABLE                    0
172 #define GPIO_PPS_OUT_CFG                (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
173         GPIO_SIGNAL_PPS_OUT)
174 #define GPIO_EXTTS_OUT_CFG              (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
175         GPIO_SIGNAL_PTP_TRIGGER)
176
177 #define RGMII_PERIOD_PS                 8000U
178 #define PS_PER_DEGREE                   div_u64(RGMII_PERIOD_PS, 360)
179 #define MIN_ID_PS                       1644U
180 #define MAX_ID_PS                       2260U
181 #define DEFAULT_ID_PS                   2000U
182
183 #define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \
184         (ppb) * (ptp_clk_period), NSEC_PER_SEC)
185
186 #define NXP_C45_SKB_CB(skb)     ((struct nxp_c45_skb_cb *)(skb)->cb)
187
188 struct nxp_c45_phy;
189
190 struct nxp_c45_skb_cb {
191         struct ptp_header *header;
192         unsigned int type;
193 };
194
195 #define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size) \
196         ((struct nxp_c45_reg_field) {                   \
197                 .reg = _reg,                            \
198                 .devad =  _devad,                       \
199                 .offset = _offset,                      \
200                 .size = _size,                          \
201         })
202
203 struct nxp_c45_reg_field {
204         u16 reg;
205         u8 devad;
206         u8 offset;
207         u8 size;
208 };
209
210 struct nxp_c45_hwts {
211         u32     nsec;
212         u32     sec;
213         u8      domain_number;
214         u16     sequence_id;
215         u8      msg_type;
216 };
217
218 struct nxp_c45_regmap {
219         /* PTP config regs. */
220         u16 vend1_ptp_clk_period;
221         u16 vend1_event_msg_filt;
222
223         /* LTC bits and regs. */
224         struct nxp_c45_reg_field ltc_read;
225         struct nxp_c45_reg_field ltc_write;
226         struct nxp_c45_reg_field ltc_lock_ctrl;
227         u16 vend1_ltc_wr_nsec_0;
228         u16 vend1_ltc_wr_nsec_1;
229         u16 vend1_ltc_wr_sec_0;
230         u16 vend1_ltc_wr_sec_1;
231         u16 vend1_ltc_rd_nsec_0;
232         u16 vend1_ltc_rd_nsec_1;
233         u16 vend1_ltc_rd_sec_0;
234         u16 vend1_ltc_rd_sec_1;
235         u16 vend1_rate_adj_subns_0;
236         u16 vend1_rate_adj_subns_1;
237
238         /* External trigger reg fields. */
239         struct nxp_c45_reg_field irq_egr_ts_en;
240         struct nxp_c45_reg_field irq_egr_ts_status;
241         struct nxp_c45_reg_field domain_number;
242         struct nxp_c45_reg_field msg_type;
243         struct nxp_c45_reg_field sequence_id;
244         struct nxp_c45_reg_field sec_1_0;
245         struct nxp_c45_reg_field sec_4_2;
246         struct nxp_c45_reg_field nsec_15_0;
247         struct nxp_c45_reg_field nsec_29_16;
248
249         /* PPS and EXT Trigger bits and regs. */
250         struct nxp_c45_reg_field pps_enable;
251         struct nxp_c45_reg_field pps_polarity;
252         u16 vend1_ext_trg_data_0;
253         u16 vend1_ext_trg_data_1;
254         u16 vend1_ext_trg_data_2;
255         u16 vend1_ext_trg_data_3;
256         u16 vend1_ext_trg_ctrl;
257
258         /* Cable test reg fields. */
259         u16 cable_test;
260         struct nxp_c45_reg_field cable_test_valid;
261         struct nxp_c45_reg_field cable_test_result;
262 };
263
264 struct nxp_c45_phy_stats {
265         const char      *name;
266         const struct nxp_c45_reg_field counter;
267 };
268
269 struct nxp_c45_phy_data {
270         const struct nxp_c45_regmap *regmap;
271         const struct nxp_c45_phy_stats *stats;
272         int n_stats;
273         u8 ptp_clk_period;
274         bool ext_ts_both_edges;
275         bool ack_ptp_irq;
276         void (*counters_enable)(struct phy_device *phydev);
277         bool (*get_egressts)(struct nxp_c45_phy *priv,
278                              struct nxp_c45_hwts *hwts);
279         bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts);
280         void (*ptp_init)(struct phy_device *phydev);
281         void (*ptp_enable)(struct phy_device *phydev, bool enable);
282         void (*nmi_handler)(struct phy_device *phydev,
283                             irqreturn_t *irq_status);
284 };
285
286 static const
287 struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
288 {
289         return phydev->drv->driver_data;
290 }
291
292 static const
293 struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev)
294 {
295         const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
296
297         return phy_data->regmap;
298 }
299
300 static int nxp_c45_read_reg_field(struct phy_device *phydev,
301                                   const struct nxp_c45_reg_field *reg_field)
302 {
303         u16 mask;
304         int ret;
305
306         if (reg_field->size == 0) {
307                 phydev_err(phydev, "Trying to read a reg field of size 0.\n");
308                 return -EINVAL;
309         }
310
311         ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
312         if (ret < 0)
313                 return ret;
314
315         mask = reg_field->size == 1 ? BIT(reg_field->offset) :
316                 GENMASK(reg_field->offset + reg_field->size - 1,
317                         reg_field->offset);
318         ret &= mask;
319         ret >>= reg_field->offset;
320
321         return ret;
322 }
323
324 static int nxp_c45_write_reg_field(struct phy_device *phydev,
325                                    const struct nxp_c45_reg_field *reg_field,
326                                    u16 val)
327 {
328         u16 mask;
329         u16 set;
330
331         if (reg_field->size == 0) {
332                 phydev_err(phydev, "Trying to write a reg field of size 0.\n");
333                 return -EINVAL;
334         }
335
336         mask = reg_field->size == 1 ? BIT(reg_field->offset) :
337                 GENMASK(reg_field->offset + reg_field->size - 1,
338                         reg_field->offset);
339         set = val << reg_field->offset;
340
341         return phy_modify_mmd_changed(phydev, reg_field->devad,
342                                       reg_field->reg, mask, set);
343 }
344
345 static int nxp_c45_set_reg_field(struct phy_device *phydev,
346                                  const struct nxp_c45_reg_field *reg_field)
347 {
348         if (reg_field->size != 1) {
349                 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
350                 return -EINVAL;
351         }
352
353         return nxp_c45_write_reg_field(phydev, reg_field, 1);
354 }
355
356 static int nxp_c45_clear_reg_field(struct phy_device *phydev,
357                                    const struct nxp_c45_reg_field *reg_field)
358 {
359         if (reg_field->size != 1) {
360                 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
361                 return -EINVAL;
362         }
363
364         return nxp_c45_write_reg_field(phydev, reg_field, 0);
365 }
366
367 static bool nxp_c45_poll_txts(struct phy_device *phydev)
368 {
369         return phydev->irq <= 0;
370 }
371
372 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
373                                    struct timespec64 *ts,
374                                    struct ptp_system_timestamp *sts)
375 {
376         struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
377         const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
378
379         nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_read);
380         ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
381                                    regmap->vend1_ltc_rd_nsec_0);
382         ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
383                                     regmap->vend1_ltc_rd_nsec_1) << 16;
384         ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
385                                   regmap->vend1_ltc_rd_sec_0);
386         ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
387                                    regmap->vend1_ltc_rd_sec_1) << 16;
388
389         return 0;
390 }
391
392 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
393                                   struct timespec64 *ts,
394                                   struct ptp_system_timestamp *sts)
395 {
396         struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
397
398         mutex_lock(&priv->ptp_lock);
399         _nxp_c45_ptp_gettimex64(ptp, ts, sts);
400         mutex_unlock(&priv->ptp_lock);
401
402         return 0;
403 }
404
405 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
406                                   const struct timespec64 *ts)
407 {
408         struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
409         const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
410
411         phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
412                       ts->tv_nsec);
413         phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
414                       ts->tv_nsec >> 16);
415         phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
416                       ts->tv_sec);
417         phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
418                       ts->tv_sec >> 16);
419         nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_write);
420
421         return 0;
422 }
423
424 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
425                                  const struct timespec64 *ts)
426 {
427         struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
428
429         mutex_lock(&priv->ptp_lock);
430         _nxp_c45_ptp_settime64(ptp, ts);
431         mutex_unlock(&priv->ptp_lock);
432
433         return 0;
434 }
435
436 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
437 {
438         struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
439         const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
440         const struct nxp_c45_regmap *regmap = data->regmap;
441         s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
442         u64 subns_inc_val;
443         bool inc;
444
445         mutex_lock(&priv->ptp_lock);
446         inc = ppb >= 0;
447         ppb = abs(ppb);
448
449         subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
450
451         phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
452                       regmap->vend1_rate_adj_subns_0,
453                       subns_inc_val);
454         subns_inc_val >>= 16;
455         subns_inc_val |= CLK_RATE_ADJ_LD;
456         if (inc)
457                 subns_inc_val |= CLK_RATE_ADJ_DIR;
458
459         phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
460                       regmap->vend1_rate_adj_subns_1,
461                       subns_inc_val);
462         mutex_unlock(&priv->ptp_lock);
463
464         return 0;
465 }
466
467 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
468 {
469         struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
470         struct timespec64 now, then;
471
472         mutex_lock(&priv->ptp_lock);
473         then = ns_to_timespec64(delta);
474         _nxp_c45_ptp_gettimex64(ptp, &now, NULL);
475         now = timespec64_add(now, then);
476         _nxp_c45_ptp_settime64(ptp, &now);
477         mutex_unlock(&priv->ptp_lock);
478
479         return 0;
480 }
481
482 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
483                                    struct nxp_c45_hwts *hwts)
484 {
485         ts->tv_nsec = hwts->nsec;
486         if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
487                 ts->tv_sec -= TS_SEC_MASK + 1;
488         ts->tv_sec &= ~TS_SEC_MASK;
489         ts->tv_sec |= hwts->sec & TS_SEC_MASK;
490 }
491
492 static bool nxp_c45_match_ts(struct ptp_header *header,
493                              struct nxp_c45_hwts *hwts,
494                              unsigned int type)
495 {
496         return ntohs(header->sequence_id) == hwts->sequence_id &&
497                ptp_get_msgtype(header, type) == hwts->msg_type &&
498                header->domain_number  == hwts->domain_number;
499 }
500
501 static bool nxp_c45_get_extts(struct nxp_c45_phy *priv,
502                               struct timespec64 *extts)
503 {
504         const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
505
506         extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
507                                       regmap->vend1_ext_trg_data_0);
508         extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
509                                        regmap->vend1_ext_trg_data_1) << 16;
510         extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
511                                      regmap->vend1_ext_trg_data_2);
512         extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
513                                       regmap->vend1_ext_trg_data_3) << 16;
514         phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
515                       regmap->vend1_ext_trg_ctrl, RING_DONE);
516
517         return true;
518 }
519
520 static bool tja1120_extts_is_valid(struct phy_device *phydev)
521 {
522         bool valid;
523         int reg;
524
525         reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
526                            TJA1120_VEND1_PTP_TRIG_DATA_S);
527         valid = !!(reg & TJA1120_TS_VALID);
528
529         return valid;
530 }
531
532 static bool tja1120_get_extts(struct nxp_c45_phy *priv,
533                               struct timespec64 *extts)
534 {
535         const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
536         struct phy_device *phydev = priv->phydev;
537         bool more_ts;
538         bool valid;
539         u16 reg;
540
541         reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
542                            regmap->vend1_ext_trg_ctrl);
543         more_ts = !!(reg & TJA1120_MORE_TS);
544
545         valid = tja1120_extts_is_valid(phydev);
546         if (!valid) {
547                 if (!more_ts)
548                         goto tja1120_get_extts_out;
549
550                 /* Bug workaround for TJA1120 engineering samples: move the new
551                  * timestamp from the FIFO to the buffer.
552                  */
553                 phy_write_mmd(phydev, MDIO_MMD_VEND1,
554                               regmap->vend1_ext_trg_ctrl, RING_DONE);
555                 valid = tja1120_extts_is_valid(phydev);
556                 if (!valid)
557                         goto tja1120_get_extts_out;
558         }
559
560         nxp_c45_get_extts(priv, extts);
561 tja1120_get_extts_out:
562         return valid;
563 }
564
565 static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv,
566                                    struct nxp_c45_hwts *hwts)
567 {
568         const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
569         struct phy_device *phydev = priv->phydev;
570
571         hwts->domain_number =
572                 nxp_c45_read_reg_field(phydev, &regmap->domain_number);
573         hwts->msg_type =
574                 nxp_c45_read_reg_field(phydev, &regmap->msg_type);
575         hwts->sequence_id =
576                 nxp_c45_read_reg_field(phydev, &regmap->sequence_id);
577         hwts->nsec =
578                 nxp_c45_read_reg_field(phydev, &regmap->nsec_15_0);
579         hwts->nsec |=
580                 nxp_c45_read_reg_field(phydev, &regmap->nsec_29_16) << 16;
581         hwts->sec = nxp_c45_read_reg_field(phydev, &regmap->sec_1_0);
582         hwts->sec |= nxp_c45_read_reg_field(phydev, &regmap->sec_4_2) << 2;
583 }
584
585 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
586                                struct nxp_c45_hwts *hwts)
587 {
588         bool valid;
589         u16 reg;
590
591         mutex_lock(&priv->ptp_lock);
592         phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
593                       RING_DONE);
594         reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
595         valid = !!(reg & RING_DATA_0_TS_VALID);
596         if (!valid)
597                 goto nxp_c45_get_hwtxts_out;
598
599         nxp_c45_read_egress_ts(priv, hwts);
600 nxp_c45_get_hwtxts_out:
601         mutex_unlock(&priv->ptp_lock);
602         return valid;
603 }
604
605 static bool tja1120_egress_ts_is_valid(struct phy_device *phydev)
606 {
607         bool valid;
608         u16 reg;
609
610         reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S);
611         valid = !!(reg & TJA1120_TS_VALID);
612
613         return valid;
614 }
615
616 static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv,
617                                struct nxp_c45_hwts *hwts)
618 {
619         struct phy_device *phydev = priv->phydev;
620         bool more_ts;
621         bool valid;
622         u16 reg;
623
624         mutex_lock(&priv->ptp_lock);
625         reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END);
626         more_ts = !!(reg & TJA1120_MORE_TS);
627         valid = tja1120_egress_ts_is_valid(phydev);
628         if (!valid) {
629                 if (!more_ts)
630                         goto tja1120_get_hwtxts_out;
631
632                 /* Bug workaround for TJA1120 engineering samples: move the
633                  * new timestamp from the FIFO to the buffer.
634                  */
635                 phy_write_mmd(phydev, MDIO_MMD_VEND1,
636                               TJA1120_EGRESS_TS_END, TJA1120_TS_VALID);
637                 valid = tja1120_egress_ts_is_valid(phydev);
638                 if (!valid)
639                         goto tja1120_get_hwtxts_out;
640         }
641         nxp_c45_read_egress_ts(priv, hwts);
642         phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S,
643                            TJA1120_TS_VALID);
644 tja1120_get_hwtxts_out:
645         mutex_unlock(&priv->ptp_lock);
646         return valid;
647 }
648
649 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
650                                  struct nxp_c45_hwts *txts)
651 {
652         struct sk_buff *skb, *tmp, *skb_match = NULL;
653         struct skb_shared_hwtstamps shhwtstamps;
654         struct timespec64 ts;
655         unsigned long flags;
656         bool ts_match;
657         s64 ts_ns;
658
659         spin_lock_irqsave(&priv->tx_queue.lock, flags);
660         skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
661                 ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
662                                             NXP_C45_SKB_CB(skb)->type);
663                 if (!ts_match)
664                         continue;
665                 skb_match = skb;
666                 __skb_unlink(skb, &priv->tx_queue);
667                 break;
668         }
669         spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
670
671         if (skb_match) {
672                 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
673                 nxp_c45_reconstruct_ts(&ts, txts);
674                 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
675                 ts_ns = timespec64_to_ns(&ts);
676                 shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
677                 skb_complete_tx_timestamp(skb_match, &shhwtstamps);
678         } else {
679                 phydev_warn(priv->phydev,
680                             "the tx timestamp doesn't match with any skb\n");
681         }
682 }
683
684 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
685 {
686         struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
687         const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
688         bool poll_txts = nxp_c45_poll_txts(priv->phydev);
689         struct skb_shared_hwtstamps *shhwtstamps_rx;
690         struct ptp_clock_event event;
691         struct nxp_c45_hwts hwts;
692         bool reschedule = false;
693         struct timespec64 ts;
694         struct sk_buff *skb;
695         bool ts_valid;
696         u32 ts_raw;
697
698         while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
699                 ts_valid = data->get_egressts(priv, &hwts);
700                 if (unlikely(!ts_valid)) {
701                         /* Still more skbs in the queue */
702                         reschedule = true;
703                         break;
704                 }
705
706                 nxp_c45_process_txts(priv, &hwts);
707         }
708
709         while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
710                 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
711                 ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
712                 hwts.sec = ts_raw >> 30;
713                 hwts.nsec = ts_raw & GENMASK(29, 0);
714                 nxp_c45_reconstruct_ts(&ts, &hwts);
715                 shhwtstamps_rx = skb_hwtstamps(skb);
716                 shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
717                 NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
718                 netif_rx(skb);
719         }
720
721         if (priv->extts) {
722                 ts_valid = data->get_extts(priv, &ts);
723                 if (ts_valid && timespec64_compare(&ts, &priv->extts_ts) != 0) {
724                         priv->extts_ts = ts;
725                         event.index = priv->extts_index;
726                         event.type = PTP_CLOCK_EXTTS;
727                         event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
728                         ptp_clock_event(priv->ptp_clock, &event);
729                 }
730                 reschedule = true;
731         }
732
733         return reschedule ? 1 : -1;
734 }
735
736 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
737                                 int pin, u16 pin_cfg)
738 {
739         struct phy_device *phydev = priv->phydev;
740
741         phy_write_mmd(phydev, MDIO_MMD_VEND1,
742                       VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
743 }
744
745 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
746                                  struct ptp_perout_request *perout, int on)
747 {
748         const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
749         struct phy_device *phydev = priv->phydev;
750         int pin;
751
752         if (perout->flags & ~PTP_PEROUT_PHASE)
753                 return -EOPNOTSUPP;
754
755         pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
756         if (pin < 0)
757                 return pin;
758
759         if (!on) {
760                 nxp_c45_clear_reg_field(priv->phydev,
761                                         &regmap->pps_enable);
762                 nxp_c45_clear_reg_field(priv->phydev,
763                                         &regmap->pps_polarity);
764
765                 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
766
767                 return 0;
768         }
769
770         /* The PPS signal is fixed to 1 second and is always generated when the
771          * seconds counter is incremented. The start time is not configurable.
772          * If the clock is adjusted, the PPS signal is automatically readjusted.
773          */
774         if (perout->period.sec != 1 || perout->period.nsec != 0) {
775                 phydev_warn(phydev, "The period can be set only to 1 second.");
776                 return -EINVAL;
777         }
778
779         if (!(perout->flags & PTP_PEROUT_PHASE)) {
780                 if (perout->start.sec != 0 || perout->start.nsec != 0) {
781                         phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
782                         return -EINVAL;
783                 }
784         } else {
785                 if (perout->phase.nsec != 0 &&
786                     perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
787                         phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
788                         return -EINVAL;
789                 }
790
791                 if (perout->phase.nsec == 0)
792                         nxp_c45_clear_reg_field(priv->phydev,
793                                                 &regmap->pps_polarity);
794                 else
795                         nxp_c45_set_reg_field(priv->phydev,
796                                               &regmap->pps_polarity);
797         }
798
799         nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
800
801         nxp_c45_set_reg_field(priv->phydev, &regmap->pps_enable);
802
803         return 0;
804 }
805
806 static void nxp_c45_set_rising_or_falling(struct phy_device *phydev,
807                                           struct ptp_extts_request *extts)
808 {
809         if (extts->flags & PTP_RISING_EDGE)
810                 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
811                                    VEND1_PTP_CONFIG, EXT_TRG_EDGE);
812
813         if (extts->flags & PTP_FALLING_EDGE)
814                 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
815                                  VEND1_PTP_CONFIG, EXT_TRG_EDGE);
816 }
817
818 static void nxp_c45_set_rising_and_falling(struct phy_device *phydev,
819                                            struct ptp_extts_request *extts)
820 {
821         /* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In
822          * this case external ts will be enabled on rising edge.
823          */
824         if (extts->flags & PTP_RISING_EDGE ||
825             extts->flags == PTP_ENABLE_FEATURE)
826                 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
827                                  TJA1120_SYNC_TRIG_FILTER,
828                                  PTP_TRIG_RISE_TS);
829         else
830                 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
831                                    TJA1120_SYNC_TRIG_FILTER,
832                                    PTP_TRIG_RISE_TS);
833
834         if (extts->flags & PTP_FALLING_EDGE)
835                 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
836                                  TJA1120_SYNC_TRIG_FILTER,
837                                  PTP_TRIG_FALLING_TS);
838         else
839                 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
840                                    TJA1120_SYNC_TRIG_FILTER,
841                                    PTP_TRIG_FALLING_TS);
842 }
843
844 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
845                                 struct ptp_extts_request *extts, int on)
846 {
847         const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
848         int pin;
849
850         if (extts->flags & ~(PTP_ENABLE_FEATURE |
851                               PTP_RISING_EDGE |
852                               PTP_FALLING_EDGE |
853                               PTP_STRICT_FLAGS))
854                 return -EOPNOTSUPP;
855
856         /* Sampling on both edges is not supported */
857         if ((extts->flags & PTP_RISING_EDGE) &&
858             (extts->flags & PTP_FALLING_EDGE) &&
859             !data->ext_ts_both_edges)
860                 return -EOPNOTSUPP;
861
862         pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
863         if (pin < 0)
864                 return pin;
865
866         if (!on) {
867                 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
868                 priv->extts = false;
869
870                 return 0;
871         }
872
873         if (data->ext_ts_both_edges)
874                 nxp_c45_set_rising_and_falling(priv->phydev, extts);
875         else
876                 nxp_c45_set_rising_or_falling(priv->phydev, extts);
877
878         nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
879         priv->extts = true;
880         priv->extts_index = extts->index;
881         ptp_schedule_worker(priv->ptp_clock, 0);
882
883         return 0;
884 }
885
886 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
887                               struct ptp_clock_request *req, int on)
888 {
889         struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
890
891         switch (req->type) {
892         case PTP_CLK_REQ_EXTTS:
893                 return nxp_c45_extts_enable(priv, &req->extts, on);
894         case PTP_CLK_REQ_PEROUT:
895                 return nxp_c45_perout_enable(priv, &req->perout, on);
896         default:
897                 return -EOPNOTSUPP;
898         }
899 }
900
901 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
902         { "nxp_c45_gpio0", 0, PTP_PF_NONE},
903         { "nxp_c45_gpio1", 1, PTP_PF_NONE},
904         { "nxp_c45_gpio2", 2, PTP_PF_NONE},
905         { "nxp_c45_gpio3", 3, PTP_PF_NONE},
906         { "nxp_c45_gpio4", 4, PTP_PF_NONE},
907         { "nxp_c45_gpio5", 5, PTP_PF_NONE},
908         { "nxp_c45_gpio6", 6, PTP_PF_NONE},
909         { "nxp_c45_gpio7", 7, PTP_PF_NONE},
910         { "nxp_c45_gpio8", 8, PTP_PF_NONE},
911         { "nxp_c45_gpio9", 9, PTP_PF_NONE},
912         { "nxp_c45_gpio10", 10, PTP_PF_NONE},
913         { "nxp_c45_gpio11", 11, PTP_PF_NONE},
914 };
915
916 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
917                                   enum ptp_pin_function func, unsigned int chan)
918 {
919         if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
920                 return -EINVAL;
921
922         switch (func) {
923         case PTP_PF_NONE:
924         case PTP_PF_PEROUT:
925         case PTP_PF_EXTTS:
926                 break;
927         default:
928                 return -EOPNOTSUPP;
929         }
930
931         return 0;
932 }
933
934 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
935 {
936         priv->caps = (struct ptp_clock_info) {
937                 .owner          = THIS_MODULE,
938                 .name           = "NXP C45 PHC",
939                 .max_adj        = 16666666,
940                 .adjfine        = nxp_c45_ptp_adjfine,
941                 .adjtime        = nxp_c45_ptp_adjtime,
942                 .gettimex64     = nxp_c45_ptp_gettimex64,
943                 .settime64      = nxp_c45_ptp_settime64,
944                 .enable         = nxp_c45_ptp_enable,
945                 .verify         = nxp_c45_ptp_verify_pin,
946                 .do_aux_work    = nxp_c45_do_aux_work,
947                 .pin_config     = nxp_c45_ptp_pins,
948                 .n_pins         = ARRAY_SIZE(nxp_c45_ptp_pins),
949                 .n_ext_ts       = 1,
950                 .n_per_out      = 1,
951         };
952
953         priv->ptp_clock = ptp_clock_register(&priv->caps,
954                                              &priv->phydev->mdio.dev);
955
956         if (IS_ERR(priv->ptp_clock))
957                 return PTR_ERR(priv->ptp_clock);
958
959         if (!priv->ptp_clock)
960                 return -ENOMEM;
961
962         return 0;
963 }
964
965 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
966                              struct sk_buff *skb, int type)
967 {
968         struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
969                                                 mii_ts);
970
971         switch (priv->hwts_tx) {
972         case HWTSTAMP_TX_ON:
973                 NXP_C45_SKB_CB(skb)->type = type;
974                 NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
975                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
976                 skb_queue_tail(&priv->tx_queue, skb);
977                 if (nxp_c45_poll_txts(priv->phydev))
978                         ptp_schedule_worker(priv->ptp_clock, 0);
979                 break;
980         case HWTSTAMP_TX_OFF:
981         default:
982                 kfree_skb(skb);
983                 break;
984         }
985 }
986
987 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
988                              struct sk_buff *skb, int type)
989 {
990         struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
991                                                 mii_ts);
992         struct ptp_header *header = ptp_parse_header(skb, type);
993
994         if (!header)
995                 return false;
996
997         if (!priv->hwts_rx)
998                 return false;
999
1000         NXP_C45_SKB_CB(skb)->header = header;
1001         skb_queue_tail(&priv->rx_queue, skb);
1002         ptp_schedule_worker(priv->ptp_clock, 0);
1003
1004         return true;
1005 }
1006
1007 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
1008                             struct kernel_hwtstamp_config *cfg,
1009                             struct netlink_ext_ack *extack)
1010 {
1011         struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1012                                                 mii_ts);
1013         struct phy_device *phydev = priv->phydev;
1014         const struct nxp_c45_phy_data *data;
1015
1016         if (cfg->tx_type < 0 || cfg->tx_type > HWTSTAMP_TX_ON)
1017                 return -ERANGE;
1018
1019         data = nxp_c45_get_data(phydev);
1020         priv->hwts_tx = cfg->tx_type;
1021
1022         switch (cfg->rx_filter) {
1023         case HWTSTAMP_FILTER_NONE:
1024                 priv->hwts_rx = 0;
1025                 break;
1026         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1027         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1028         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1029                 priv->hwts_rx = 1;
1030                 cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1031                 break;
1032         default:
1033                 return -ERANGE;
1034         }
1035
1036         if (priv->hwts_rx || priv->hwts_tx) {
1037                 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1038                               data->regmap->vend1_event_msg_filt,
1039                               EVENT_MSG_FILT_ALL);
1040                 data->ptp_enable(phydev, true);
1041         } else {
1042                 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1043                               data->regmap->vend1_event_msg_filt,
1044                               EVENT_MSG_FILT_NONE);
1045                 data->ptp_enable(phydev, false);
1046         }
1047
1048         if (nxp_c45_poll_txts(priv->phydev))
1049                 goto nxp_c45_no_ptp_irq;
1050
1051         if (priv->hwts_tx)
1052                 nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1053         else
1054                 nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1055
1056 nxp_c45_no_ptp_irq:
1057         return 0;
1058 }
1059
1060 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
1061                            struct ethtool_ts_info *ts_info)
1062 {
1063         struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1064                                                 mii_ts);
1065
1066         ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1067                         SOF_TIMESTAMPING_RX_HARDWARE |
1068                         SOF_TIMESTAMPING_RAW_HARDWARE;
1069         ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
1070         ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1071         ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1072                         (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1073                         (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1074                         (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
1075
1076         return 0;
1077 }
1078
1079 static const struct nxp_c45_phy_stats common_hw_stats[] = {
1080         { "phy_link_status_drop_cnt",
1081                 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), },
1082         { "phy_link_availability_drop_cnt",
1083                 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), },
1084         { "phy_link_loss_cnt",
1085                 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), },
1086         { "phy_link_failure_cnt",
1087                 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), },
1088         { "phy_symbol_error_cnt",
1089                 NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) },
1090 };
1091
1092 static const struct nxp_c45_phy_stats tja1103_hw_stats[] = {
1093         { "rx_preamble_count",
1094                 NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), },
1095         { "tx_preamble_count",
1096                 NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), },
1097         { "rx_ipg_length",
1098                 NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), },
1099         { "tx_ipg_length",
1100                 NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), },
1101 };
1102
1103 static const struct nxp_c45_phy_stats tja1120_hw_stats[] = {
1104         { "phy_symbol_error_cnt_ext",
1105                 NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) },
1106         { "tx_frames_xtd",
1107                 NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), },
1108         { "tx_frames",
1109                 NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), },
1110         { "rx_frames_xtd",
1111                 NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), },
1112         { "rx_frames",
1113                 NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), },
1114         { "tx_lost_frames_xtd",
1115                 NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), },
1116         { "tx_lost_frames",
1117                 NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), },
1118         { "rx_lost_frames_xtd",
1119                 NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), },
1120         { "rx_lost_frames",
1121                 NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), },
1122 };
1123
1124 static int nxp_c45_get_sset_count(struct phy_device *phydev)
1125 {
1126         const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1127
1128         return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
1129 }
1130
1131 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
1132 {
1133         const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1134         size_t count = nxp_c45_get_sset_count(phydev);
1135         size_t idx;
1136         size_t i;
1137
1138         for (i = 0; i < count; i++) {
1139                 if (i < ARRAY_SIZE(common_hw_stats)) {
1140                         strscpy(data + i * ETH_GSTRING_LEN,
1141                                 common_hw_stats[i].name, ETH_GSTRING_LEN);
1142                         continue;
1143                 }
1144                 idx = i - ARRAY_SIZE(common_hw_stats);
1145                 strscpy(data + i * ETH_GSTRING_LEN,
1146                         phy_data->stats[idx].name, ETH_GSTRING_LEN);
1147         }
1148 }
1149
1150 static void nxp_c45_get_stats(struct phy_device *phydev,
1151                               struct ethtool_stats *stats, u64 *data)
1152 {
1153         const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1154         size_t count = nxp_c45_get_sset_count(phydev);
1155         const struct nxp_c45_reg_field *reg_field;
1156         size_t idx;
1157         size_t i;
1158         int ret;
1159
1160         for (i = 0; i < count; i++) {
1161                 if (i < ARRAY_SIZE(common_hw_stats)) {
1162                         reg_field = &common_hw_stats[i].counter;
1163                 } else {
1164                         idx = i - ARRAY_SIZE(common_hw_stats);
1165                         reg_field = &phy_data->stats[idx].counter;
1166                 }
1167
1168                 ret = nxp_c45_read_reg_field(phydev, reg_field);
1169                 if (ret < 0)
1170                         data[i] = U64_MAX;
1171                 else
1172                         data[i] = ret;
1173         }
1174 }
1175
1176 static int nxp_c45_config_enable(struct phy_device *phydev)
1177 {
1178         phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1179                       DEVICE_CONTROL_CONFIG_GLOBAL_EN |
1180                       DEVICE_CONTROL_CONFIG_ALL_EN);
1181         usleep_range(400, 450);
1182
1183         phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
1184                       PORT_CONTROL_EN);
1185         phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1186                       PHY_CONFIG_EN);
1187         phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
1188                       PORT_INFRA_CONTROL_EN);
1189
1190         return 0;
1191 }
1192
1193 static int nxp_c45_start_op(struct phy_device *phydev)
1194 {
1195         return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1196                                 PHY_START_OP);
1197 }
1198
1199 static int nxp_c45_config_intr(struct phy_device *phydev)
1200 {
1201         int ret;
1202
1203         if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
1204                 ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1205                                        VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1206                 if (ret)
1207                         return ret;
1208
1209                 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1210                                         VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1211         }
1212
1213         ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1214                                  VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1215         if (ret)
1216                 return ret;
1217
1218         return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1219                                   VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1220 }
1221
1222 static int tja1103_config_intr(struct phy_device *phydev)
1223 {
1224         int ret;
1225
1226         /* We can't disable the FUSA IRQ for TJA1103, but we can clean it up. */
1227         ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_ALWAYS_ACCESSIBLE,
1228                             FUSA_PASS);
1229         if (ret)
1230                 return ret;
1231
1232         return nxp_c45_config_intr(phydev);
1233 }
1234
1235 static int tja1120_config_intr(struct phy_device *phydev)
1236 {
1237         int ret;
1238
1239         if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1240                 ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1241                                        TJA1120_GLOBAL_INFRA_IRQ_EN,
1242                                        TJA1120_DEV_BOOT_DONE);
1243         else
1244                 ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1245                                          TJA1120_GLOBAL_INFRA_IRQ_EN,
1246                                          TJA1120_DEV_BOOT_DONE);
1247         if (ret)
1248                 return ret;
1249
1250         return nxp_c45_config_intr(phydev);
1251 }
1252
1253 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
1254 {
1255         const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1256         struct nxp_c45_phy *priv = phydev->priv;
1257         irqreturn_t ret = IRQ_NONE;
1258         struct nxp_c45_hwts hwts;
1259         int irq;
1260
1261         irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
1262         if (irq & PHY_IRQ_LINK_EVENT) {
1263                 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
1264                               PHY_IRQ_LINK_EVENT);
1265                 phy_trigger_machine(phydev);
1266                 ret = IRQ_HANDLED;
1267         }
1268
1269         irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1270         if (irq) {
1271                 /* If ack_ptp_irq is false, the IRQ bit is self-clear and will
1272                  * be cleared when the EGR TS FIFO is empty. Otherwise, the
1273                  * IRQ bit should be cleared before reading the timestamp,
1274                  */
1275                 if (data->ack_ptp_irq)
1276                         phy_write_mmd(phydev, MDIO_MMD_VEND1,
1277                                       VEND1_PTP_IRQ_ACK, EGR_TS_IRQ);
1278                 while (data->get_egressts(priv, &hwts))
1279                         nxp_c45_process_txts(priv, &hwts);
1280
1281                 ret = IRQ_HANDLED;
1282         }
1283
1284         data->nmi_handler(phydev, &ret);
1285         nxp_c45_handle_macsec_interrupt(phydev, &ret);
1286
1287         return ret;
1288 }
1289
1290 static int nxp_c45_soft_reset(struct phy_device *phydev)
1291 {
1292         int ret;
1293
1294         ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1295                             DEVICE_CONTROL_RESET);
1296         if (ret)
1297                 return ret;
1298
1299         return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
1300                                          VEND1_DEVICE_CONTROL, ret,
1301                                          !(ret & DEVICE_CONTROL_RESET), 20000,
1302                                          240000, false);
1303 }
1304
1305 static int nxp_c45_cable_test_start(struct phy_device *phydev)
1306 {
1307         const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1308
1309         phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1310                          VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1311         return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1312                                 CABLE_TEST_ENABLE | CABLE_TEST_START);
1313 }
1314
1315 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
1316                                          bool *finished)
1317 {
1318         const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1319         int ret;
1320         u8 cable_test_result;
1321
1322         ret = nxp_c45_read_reg_field(phydev, &regmap->cable_test_valid);
1323         if (!ret) {
1324                 *finished = false;
1325                 return 0;
1326         }
1327
1328         *finished = true;
1329         cable_test_result = nxp_c45_read_reg_field(phydev,
1330                                                    &regmap->cable_test_result);
1331
1332         switch (cable_test_result) {
1333         case CABLE_TEST_OK:
1334                 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1335                                         ETHTOOL_A_CABLE_RESULT_CODE_OK);
1336                 break;
1337         case CABLE_TEST_SHORTED:
1338                 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1339                                         ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
1340                 break;
1341         case CABLE_TEST_OPEN:
1342                 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1343                                         ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
1344                 break;
1345         default:
1346                 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1347                                         ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
1348         }
1349
1350         phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1351                            CABLE_TEST_ENABLE);
1352         phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1353                            VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1354
1355         return nxp_c45_start_op(phydev);
1356 }
1357
1358 static int nxp_c45_get_sqi(struct phy_device *phydev)
1359 {
1360         int reg;
1361
1362         reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1363         if (!(reg & SQI_VALID))
1364                 return -EINVAL;
1365
1366         reg &= SQI_MASK;
1367
1368         return reg;
1369 }
1370
1371 static void tja1120_link_change_notify(struct phy_device *phydev)
1372 {
1373         /* Bug workaround for TJA1120 enegineering samples: fix egress
1374          * timestamps lost after link recovery.
1375          */
1376         if (phydev->state == PHY_NOLINK) {
1377                 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1378                                  TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1379                 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1380                                    TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1381         }
1382 }
1383
1384 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1385 {
1386         return MAX_SQI;
1387 }
1388
1389 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1390 {
1391         if (delay < MIN_ID_PS) {
1392                 phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1393                 return -EINVAL;
1394         }
1395
1396         if (delay > MAX_ID_PS) {
1397                 phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1398                 return -EINVAL;
1399         }
1400
1401         return 0;
1402 }
1403
1404 static void nxp_c45_counters_enable(struct phy_device *phydev)
1405 {
1406         const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1407
1408         phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1409                          COUNTER_EN);
1410
1411         data->counters_enable(phydev);
1412 }
1413
1414 static void nxp_c45_ptp_init(struct phy_device *phydev)
1415 {
1416         const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1417
1418         phy_write_mmd(phydev, MDIO_MMD_VEND1,
1419                       data->regmap->vend1_ptp_clk_period,
1420                       data->ptp_clk_period);
1421         nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1422
1423         data->ptp_init(phydev);
1424 }
1425
1426 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1427 {
1428         /* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1429          * To avoid floating point operations we'll multiply by 10
1430          * and get 1 decimal point precision.
1431          */
1432         phase_offset_raw *= 10;
1433         phase_offset_raw -= 738;
1434         return div_u64(phase_offset_raw, 9);
1435 }
1436
1437 static void nxp_c45_disable_delays(struct phy_device *phydev)
1438 {
1439         phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1440         phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1441 }
1442
1443 static void nxp_c45_set_delays(struct phy_device *phydev)
1444 {
1445         struct nxp_c45_phy *priv = phydev->priv;
1446         u64 tx_delay = priv->tx_delay;
1447         u64 rx_delay = priv->rx_delay;
1448         u64 degree;
1449
1450         if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1451             phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1452                 degree = div_u64(tx_delay, PS_PER_DEGREE);
1453                 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1454                               ID_ENABLE | nxp_c45_get_phase_shift(degree));
1455         } else {
1456                 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1457                                    ID_ENABLE);
1458         }
1459
1460         if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1461             phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1462                 degree = div_u64(rx_delay, PS_PER_DEGREE);
1463                 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1464                               ID_ENABLE | nxp_c45_get_phase_shift(degree));
1465         } else {
1466                 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1467                                    ID_ENABLE);
1468         }
1469 }
1470
1471 static int nxp_c45_get_delays(struct phy_device *phydev)
1472 {
1473         struct nxp_c45_phy *priv = phydev->priv;
1474         int ret;
1475
1476         if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1477             phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1478                 ret = device_property_read_u32(&phydev->mdio.dev,
1479                                                "tx-internal-delay-ps",
1480                                                &priv->tx_delay);
1481                 if (ret)
1482                         priv->tx_delay = DEFAULT_ID_PS;
1483
1484                 ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1485                 if (ret) {
1486                         phydev_err(phydev,
1487                                    "tx-internal-delay-ps invalid value\n");
1488                         return ret;
1489                 }
1490         }
1491
1492         if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1493             phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1494                 ret = device_property_read_u32(&phydev->mdio.dev,
1495                                                "rx-internal-delay-ps",
1496                                                &priv->rx_delay);
1497                 if (ret)
1498                         priv->rx_delay = DEFAULT_ID_PS;
1499
1500                 ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1501                 if (ret) {
1502                         phydev_err(phydev,
1503                                    "rx-internal-delay-ps invalid value\n");
1504                         return ret;
1505                 }
1506         }
1507
1508         return 0;
1509 }
1510
1511 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1512 {
1513         int ret;
1514
1515         ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1516         phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1517
1518         switch (phydev->interface) {
1519         case PHY_INTERFACE_MODE_RGMII:
1520                 if (!(ret & RGMII_ABILITY)) {
1521                         phydev_err(phydev, "rgmii mode not supported\n");
1522                         return -EINVAL;
1523                 }
1524                 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1525                               MII_BASIC_CONFIG_RGMII);
1526                 nxp_c45_disable_delays(phydev);
1527                 break;
1528         case PHY_INTERFACE_MODE_RGMII_ID:
1529         case PHY_INTERFACE_MODE_RGMII_TXID:
1530         case PHY_INTERFACE_MODE_RGMII_RXID:
1531                 if (!(ret & RGMII_ID_ABILITY)) {
1532                         phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1533                         return -EINVAL;
1534                 }
1535                 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1536                               MII_BASIC_CONFIG_RGMII);
1537                 ret = nxp_c45_get_delays(phydev);
1538                 if (ret)
1539                         return ret;
1540
1541                 nxp_c45_set_delays(phydev);
1542                 break;
1543         case PHY_INTERFACE_MODE_MII:
1544                 if (!(ret & MII_ABILITY)) {
1545                         phydev_err(phydev, "mii mode not supported\n");
1546                         return -EINVAL;
1547                 }
1548                 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1549                               MII_BASIC_CONFIG_MII);
1550                 break;
1551         case PHY_INTERFACE_MODE_REVMII:
1552                 if (!(ret & REVMII_ABILITY)) {
1553                         phydev_err(phydev, "rev-mii mode not supported\n");
1554                         return -EINVAL;
1555                 }
1556                 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1557                               MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1558                 break;
1559         case PHY_INTERFACE_MODE_RMII:
1560                 if (!(ret & RMII_ABILITY)) {
1561                         phydev_err(phydev, "rmii mode not supported\n");
1562                         return -EINVAL;
1563                 }
1564                 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1565                               MII_BASIC_CONFIG_RMII);
1566                 break;
1567         case PHY_INTERFACE_MODE_SGMII:
1568                 if (!(ret & SGMII_ABILITY)) {
1569                         phydev_err(phydev, "sgmii mode not supported\n");
1570                         return -EINVAL;
1571                 }
1572                 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1573                               MII_BASIC_CONFIG_SGMII);
1574                 break;
1575         case PHY_INTERFACE_MODE_INTERNAL:
1576                 break;
1577         default:
1578                 return -EINVAL;
1579         }
1580
1581         return 0;
1582 }
1583
1584 static int nxp_c45_config_init(struct phy_device *phydev)
1585 {
1586         int ret;
1587
1588         ret = nxp_c45_config_enable(phydev);
1589         if (ret) {
1590                 phydev_err(phydev, "Failed to enable config\n");
1591                 return ret;
1592         }
1593
1594         /* Bug workaround for SJA1110 rev B: enable write access
1595          * to MDIO_MMD_PMAPMD
1596          */
1597         phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1598         phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1599
1600         phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1601                          PHY_CONFIG_AUTO);
1602
1603         ret = nxp_c45_set_phy_mode(phydev);
1604         if (ret)
1605                 return ret;
1606
1607         phydev->autoneg = AUTONEG_DISABLE;
1608
1609         nxp_c45_counters_enable(phydev);
1610         nxp_c45_ptp_init(phydev);
1611         ret = nxp_c45_macsec_config_init(phydev);
1612         if (ret)
1613                 return ret;
1614
1615         return nxp_c45_start_op(phydev);
1616 }
1617
1618 static int nxp_c45_get_features(struct phy_device *phydev)
1619 {
1620         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1621         linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1622
1623         return genphy_c45_pma_read_abilities(phydev);
1624 }
1625
1626 static int nxp_c45_probe(struct phy_device *phydev)
1627 {
1628         struct nxp_c45_phy *priv;
1629         bool macsec_ability;
1630         int phy_abilities;
1631         bool ptp_ability;
1632         int ret = 0;
1633
1634         priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1635         if (!priv)
1636                 return -ENOMEM;
1637
1638         skb_queue_head_init(&priv->tx_queue);
1639         skb_queue_head_init(&priv->rx_queue);
1640
1641         priv->phydev = phydev;
1642
1643         phydev->priv = priv;
1644
1645         mutex_init(&priv->ptp_lock);
1646
1647         phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1648                                      VEND1_PORT_ABILITIES);
1649         ptp_ability = !!(phy_abilities & PTP_ABILITY);
1650         if (!ptp_ability) {
1651                 phydev_dbg(phydev, "the phy does not support PTP");
1652                 goto no_ptp_support;
1653         }
1654
1655         if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1656             IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1657                 priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1658                 priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1659                 priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1660                 priv->mii_ts.ts_info = nxp_c45_ts_info;
1661                 phydev->mii_ts = &priv->mii_ts;
1662                 ret = nxp_c45_init_ptp_clock(priv);
1663         } else {
1664                 phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1665         }
1666
1667 no_ptp_support:
1668         macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1669         if (!macsec_ability) {
1670                 phydev_info(phydev, "the phy does not support MACsec\n");
1671                 goto no_macsec_support;
1672         }
1673
1674         if (IS_ENABLED(CONFIG_MACSEC)) {
1675                 ret = nxp_c45_macsec_probe(phydev);
1676                 phydev_dbg(phydev, "MACsec support enabled.");
1677         } else {
1678                 phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it");
1679         }
1680
1681 no_macsec_support:
1682
1683         return ret;
1684 }
1685
1686 static void nxp_c45_remove(struct phy_device *phydev)
1687 {
1688         struct nxp_c45_phy *priv = phydev->priv;
1689
1690         if (priv->ptp_clock)
1691                 ptp_clock_unregister(priv->ptp_clock);
1692
1693         skb_queue_purge(&priv->tx_queue);
1694         skb_queue_purge(&priv->rx_queue);
1695         nxp_c45_macsec_remove(phydev);
1696 }
1697
1698 static void tja1103_counters_enable(struct phy_device *phydev)
1699 {
1700         phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1701                          COUNTER_EN);
1702         phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1703                          COUNTER_EN);
1704         phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1705                          COUNTER_EN);
1706         phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1707                          COUNTER_EN);
1708 }
1709
1710 static void tja1103_ptp_init(struct phy_device *phydev)
1711 {
1712         phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1713                       TJA1103_RX_TS_INSRT_MODE2);
1714         phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1715                          PTP_ENABLE);
1716 }
1717
1718 static void tja1103_ptp_enable(struct phy_device *phydev, bool enable)
1719 {
1720         if (enable)
1721                 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1722                                    VEND1_PORT_PTP_CONTROL,
1723                                    PORT_PTP_CONTROL_BYPASS);
1724         else
1725                 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1726                                  VEND1_PORT_PTP_CONTROL,
1727                                  PORT_PTP_CONTROL_BYPASS);
1728 }
1729
1730 static void tja1103_nmi_handler(struct phy_device *phydev,
1731                                 irqreturn_t *irq_status)
1732 {
1733         int ret;
1734
1735         ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1736                            VEND1_ALWAYS_ACCESSIBLE);
1737         if (ret & FUSA_PASS) {
1738                 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1739                               VEND1_ALWAYS_ACCESSIBLE,
1740                               FUSA_PASS);
1741                 *irq_status = IRQ_HANDLED;
1742         }
1743 }
1744
1745 static const struct nxp_c45_regmap tja1103_regmap = {
1746         .vend1_ptp_clk_period   = 0x1104,
1747         .vend1_event_msg_filt   = 0x1148,
1748         .pps_enable             =
1749                 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1),
1750         .pps_polarity           =
1751                 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1),
1752         .ltc_lock_ctrl          =
1753                 NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1),
1754         .ltc_read               =
1755                 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1),
1756         .ltc_write              =
1757                 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1),
1758         .vend1_ltc_wr_nsec_0    = 0x1106,
1759         .vend1_ltc_wr_nsec_1    = 0x1107,
1760         .vend1_ltc_wr_sec_0     = 0x1108,
1761         .vend1_ltc_wr_sec_1     = 0x1109,
1762         .vend1_ltc_rd_nsec_0    = 0x110A,
1763         .vend1_ltc_rd_nsec_1    = 0x110B,
1764         .vend1_ltc_rd_sec_0     = 0x110C,
1765         .vend1_ltc_rd_sec_1     = 0x110D,
1766         .vend1_rate_adj_subns_0 = 0x110F,
1767         .vend1_rate_adj_subns_1 = 0x1110,
1768         .irq_egr_ts_en          =
1769                 NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1),
1770         .irq_egr_ts_status      =
1771                 NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1),
1772         .domain_number          =
1773                 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8),
1774         .msg_type               =
1775                 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4),
1776         .sequence_id            =
1777                 NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16),
1778         .sec_1_0                =
1779                 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2),
1780         .sec_4_2                =
1781                 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3),
1782         .nsec_15_0              =
1783                 NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16),
1784         .nsec_29_16             =
1785                 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14),
1786         .vend1_ext_trg_data_0   = 0x1121,
1787         .vend1_ext_trg_data_1   = 0x1122,
1788         .vend1_ext_trg_data_2   = 0x1123,
1789         .vend1_ext_trg_data_3   = 0x1124,
1790         .vend1_ext_trg_ctrl     = 0x1126,
1791         .cable_test             = 0x8330,
1792         .cable_test_valid       =
1793                 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1),
1794         .cable_test_result      =
1795                 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3),
1796 };
1797
1798 static const struct nxp_c45_phy_data tja1103_phy_data = {
1799         .regmap = &tja1103_regmap,
1800         .stats = tja1103_hw_stats,
1801         .n_stats = ARRAY_SIZE(tja1103_hw_stats),
1802         .ptp_clk_period = PTP_CLK_PERIOD_100BT1,
1803         .ext_ts_both_edges = false,
1804         .ack_ptp_irq = false,
1805         .counters_enable = tja1103_counters_enable,
1806         .get_egressts = nxp_c45_get_hwtxts,
1807         .get_extts = nxp_c45_get_extts,
1808         .ptp_init = tja1103_ptp_init,
1809         .ptp_enable = tja1103_ptp_enable,
1810         .nmi_handler = tja1103_nmi_handler,
1811 };
1812
1813 static void tja1120_counters_enable(struct phy_device *phydev)
1814 {
1815         phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD,
1816                          EXTENDED_CNT_EN);
1817         phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS,
1818                          MONITOR_RESET);
1819         phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG,
1820                          ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN);
1821 }
1822
1823 static void tja1120_ptp_init(struct phy_device *phydev)
1824 {
1825         phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL,
1826                       TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE);
1827         phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE,
1828                       TJA1120_TS_INSRT_MODE);
1829         phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG,
1830                          PTP_ENABLE);
1831 }
1832
1833 static void tja1120_ptp_enable(struct phy_device *phydev, bool enable)
1834 {
1835         if (enable)
1836                 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1837                                  VEND1_PORT_FUNC_ENABLES,
1838                                  PTP_ENABLE);
1839         else
1840                 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1841                                    VEND1_PORT_FUNC_ENABLES,
1842                                    PTP_ENABLE);
1843 }
1844
1845 static void tja1120_nmi_handler(struct phy_device *phydev,
1846                                 irqreturn_t *irq_status)
1847 {
1848         int ret;
1849
1850         ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1851                            TJA1120_GLOBAL_INFRA_IRQ_STATUS);
1852         if (ret & TJA1120_DEV_BOOT_DONE) {
1853                 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1854                               TJA1120_GLOBAL_INFRA_IRQ_ACK,
1855                               TJA1120_DEV_BOOT_DONE);
1856                 *irq_status = IRQ_HANDLED;
1857         }
1858 }
1859
1860 static const struct nxp_c45_regmap tja1120_regmap = {
1861         .vend1_ptp_clk_period   = 0x1020,
1862         .vend1_event_msg_filt   = 0x9010,
1863         .pps_enable             =
1864                 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1),
1865         .pps_polarity           =
1866                 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1),
1867         .ltc_lock_ctrl          =
1868                 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1),
1869         .ltc_read               =
1870                 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1),
1871         .ltc_write              =
1872                 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1),
1873         .vend1_ltc_wr_nsec_0    = 0x1040,
1874         .vend1_ltc_wr_nsec_1    = 0x1041,
1875         .vend1_ltc_wr_sec_0     = 0x1042,
1876         .vend1_ltc_wr_sec_1     = 0x1043,
1877         .vend1_ltc_rd_nsec_0    = 0x1048,
1878         .vend1_ltc_rd_nsec_1    = 0x1049,
1879         .vend1_ltc_rd_sec_0     = 0x104A,
1880         .vend1_ltc_rd_sec_1     = 0x104B,
1881         .vend1_rate_adj_subns_0 = 0x1030,
1882         .vend1_rate_adj_subns_1 = 0x1031,
1883         .irq_egr_ts_en          =
1884                 NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1),
1885         .irq_egr_ts_status      =
1886                 NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1),
1887         .domain_number          =
1888                 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8),
1889         .msg_type               =
1890                 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4),
1891         .sequence_id            =
1892                 NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16),
1893         .sec_1_0                =
1894                 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2),
1895         .sec_4_2                =
1896                 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3),
1897         .nsec_15_0              =
1898                 NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16),
1899         .nsec_29_16             =
1900                 NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14),
1901         .vend1_ext_trg_data_0   = 0x1071,
1902         .vend1_ext_trg_data_1   = 0x1072,
1903         .vend1_ext_trg_data_2   = 0x1073,
1904         .vend1_ext_trg_data_3   = 0x1074,
1905         .vend1_ext_trg_ctrl     = 0x1075,
1906         .cable_test             = 0x8360,
1907         .cable_test_valid       =
1908                 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1),
1909         .cable_test_result      =
1910                 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3),
1911 };
1912
1913 static const struct nxp_c45_phy_data tja1120_phy_data = {
1914         .regmap = &tja1120_regmap,
1915         .stats = tja1120_hw_stats,
1916         .n_stats = ARRAY_SIZE(tja1120_hw_stats),
1917         .ptp_clk_period = PTP_CLK_PERIOD_1000BT1,
1918         .ext_ts_both_edges = true,
1919         .ack_ptp_irq = true,
1920         .counters_enable = tja1120_counters_enable,
1921         .get_egressts = tja1120_get_hwtxts,
1922         .get_extts = tja1120_get_extts,
1923         .ptp_init = tja1120_ptp_init,
1924         .ptp_enable = tja1120_ptp_enable,
1925         .nmi_handler = tja1120_nmi_handler,
1926 };
1927
1928 static struct phy_driver nxp_c45_driver[] = {
1929         {
1930                 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
1931                 .name                   = "NXP C45 TJA1103",
1932                 .get_features           = nxp_c45_get_features,
1933                 .driver_data            = &tja1103_phy_data,
1934                 .probe                  = nxp_c45_probe,
1935                 .soft_reset             = nxp_c45_soft_reset,
1936                 .config_aneg            = genphy_c45_config_aneg,
1937                 .config_init            = nxp_c45_config_init,
1938                 .config_intr            = tja1103_config_intr,
1939                 .handle_interrupt       = nxp_c45_handle_interrupt,
1940                 .read_status            = genphy_c45_read_status,
1941                 .suspend                = genphy_c45_pma_suspend,
1942                 .resume                 = genphy_c45_pma_resume,
1943                 .get_sset_count         = nxp_c45_get_sset_count,
1944                 .get_strings            = nxp_c45_get_strings,
1945                 .get_stats              = nxp_c45_get_stats,
1946                 .cable_test_start       = nxp_c45_cable_test_start,
1947                 .cable_test_get_status  = nxp_c45_cable_test_get_status,
1948                 .set_loopback           = genphy_c45_loopback,
1949                 .get_sqi                = nxp_c45_get_sqi,
1950                 .get_sqi_max            = nxp_c45_get_sqi_max,
1951                 .remove                 = nxp_c45_remove,
1952         },
1953         {
1954                 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
1955                 .name                   = "NXP C45 TJA1120",
1956                 .get_features           = nxp_c45_get_features,
1957                 .driver_data            = &tja1120_phy_data,
1958                 .probe                  = nxp_c45_probe,
1959                 .soft_reset             = nxp_c45_soft_reset,
1960                 .config_aneg            = genphy_c45_config_aneg,
1961                 .config_init            = nxp_c45_config_init,
1962                 .config_intr            = tja1120_config_intr,
1963                 .handle_interrupt       = nxp_c45_handle_interrupt,
1964                 .read_status            = genphy_c45_read_status,
1965                 .link_change_notify     = tja1120_link_change_notify,
1966                 .suspend                = genphy_c45_pma_suspend,
1967                 .resume                 = genphy_c45_pma_resume,
1968                 .get_sset_count         = nxp_c45_get_sset_count,
1969                 .get_strings            = nxp_c45_get_strings,
1970                 .get_stats              = nxp_c45_get_stats,
1971                 .cable_test_start       = nxp_c45_cable_test_start,
1972                 .cable_test_get_status  = nxp_c45_cable_test_get_status,
1973                 .set_loopback           = genphy_c45_loopback,
1974                 .get_sqi                = nxp_c45_get_sqi,
1975                 .get_sqi_max            = nxp_c45_get_sqi_max,
1976                 .remove                 = nxp_c45_remove,
1977         },
1978 };
1979
1980 module_phy_driver(nxp_c45_driver);
1981
1982 static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
1983         { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
1984         { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
1985         { /*sentinel*/ },
1986 };
1987
1988 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
1989
1990 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
1991 MODULE_DESCRIPTION("NXP C45 PHY driver");
1992 MODULE_LICENSE("GPL v2");