1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 NXP
4 * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/phy.h>
14 #include <linux/processor.h>
15 #include <linux/property.h>
16 #include <linux/ptp_classify.h>
17 #include <linux/ptp_clock_kernel.h>
18 #include <linux/net_tstamp.h>
20 #define PHY_ID_TJA_1103 0x001BB010
21 #define PHY_ID_TJA_1120 0x001BB031
23 #define VEND1_DEVICE_CONTROL 0x0040
24 #define DEVICE_CONTROL_RESET BIT(15)
25 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
26 #define DEVICE_CONTROL_CONFIG_ALL_EN BIT(13)
28 #define VEND1_DEVICE_CONFIG 0x0048
30 #define TJA1120_VEND1_EXT_TS_MODE 0x1012
32 #define TJA1120_GLOBAL_INFRA_IRQ_ACK 0x2C08
33 #define TJA1120_GLOBAL_INFRA_IRQ_EN 0x2C0A
34 #define TJA1120_GLOBAL_INFRA_IRQ_STATUS 0x2C0C
35 #define TJA1120_DEV_BOOT_DONE BIT(1)
37 #define TJA1120_VEND1_PTP_TRIG_DATA_S 0x1070
39 #define TJA1120_EGRESS_TS_DATA_S 0x9060
40 #define TJA1120_EGRESS_TS_END 0x9067
41 #define TJA1120_TS_VALID BIT(0)
42 #define TJA1120_MORE_TS BIT(15)
44 #define VEND1_PHY_IRQ_ACK 0x80A0
45 #define VEND1_PHY_IRQ_EN 0x80A1
46 #define VEND1_PHY_IRQ_STATUS 0x80A2
47 #define PHY_IRQ_LINK_EVENT BIT(1)
49 #define VEND1_ALWAYS_ACCESSIBLE 0x801F
50 #define FUSA_PASS BIT(4)
52 #define VEND1_PHY_CONTROL 0x8100
53 #define PHY_CONFIG_EN BIT(14)
54 #define PHY_START_OP BIT(0)
56 #define VEND1_PHY_CONFIG 0x8108
57 #define PHY_CONFIG_AUTO BIT(0)
59 #define VEND1_SIGNAL_QUALITY 0x8320
60 #define SQI_VALID BIT(14)
61 #define SQI_MASK GENMASK(2, 0)
62 #define MAX_SQI SQI_MASK
64 #define CABLE_TEST_ENABLE BIT(15)
65 #define CABLE_TEST_START BIT(14)
66 #define CABLE_TEST_OK 0x00
67 #define CABLE_TEST_SHORTED 0x01
68 #define CABLE_TEST_OPEN 0x02
69 #define CABLE_TEST_UNKNOWN 0x07
71 #define VEND1_PORT_CONTROL 0x8040
72 #define PORT_CONTROL_EN BIT(14)
74 #define VEND1_PORT_ABILITIES 0x8046
75 #define PTP_ABILITY BIT(3)
77 #define VEND1_PORT_FUNC_IRQ_EN 0x807A
78 #define PTP_IRQS BIT(3)
80 #define VEND1_PTP_IRQ_ACK 0x9008
81 #define EGR_TS_IRQ BIT(1)
83 #define VEND1_PORT_INFRA_CONTROL 0xAC00
84 #define PORT_INFRA_CONTROL_EN BIT(14)
86 #define VEND1_RXID 0xAFCC
87 #define VEND1_TXID 0xAFCD
88 #define ID_ENABLE BIT(15)
90 #define VEND1_ABILITIES 0xAFC4
91 #define RGMII_ID_ABILITY BIT(15)
92 #define RGMII_ABILITY BIT(14)
93 #define RMII_ABILITY BIT(10)
94 #define REVMII_ABILITY BIT(9)
95 #define MII_ABILITY BIT(8)
96 #define SGMII_ABILITY BIT(0)
98 #define VEND1_MII_BASIC_CONFIG 0xAFC6
99 #define MII_BASIC_CONFIG_REV BIT(4)
100 #define MII_BASIC_CONFIG_SGMII 0x9
101 #define MII_BASIC_CONFIG_RGMII 0x7
102 #define MII_BASIC_CONFIG_RMII 0x5
103 #define MII_BASIC_CONFIG_MII 0x4
105 #define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351
106 #define EXTENDED_CNT_EN BIT(15)
107 #define VEND1_MONITOR_STATUS 0xAC80
108 #define MONITOR_RESET BIT(15)
109 #define VEND1_MONITOR_CONFIG 0xAC86
110 #define LOST_FRAMES_CNT_EN BIT(9)
111 #define ALL_FRAMES_CNT_EN BIT(8)
113 #define VEND1_SYMBOL_ERROR_COUNTER 0x8350
114 #define VEND1_LINK_DROP_COUNTER 0x8352
115 #define VEND1_LINK_LOSSES_AND_FAILURES 0x8353
116 #define VEND1_RX_PREAMBLE_COUNT 0xAFCE
117 #define VEND1_TX_PREAMBLE_COUNT 0xAFCF
118 #define VEND1_RX_IPG_LENGTH 0xAFD0
119 #define VEND1_TX_IPG_LENGTH 0xAFD1
120 #define COUNTER_EN BIT(15)
122 #define VEND1_PTP_CONFIG 0x1102
123 #define EXT_TRG_EDGE BIT(1)
125 #define TJA1120_SYNC_TRIG_FILTER 0x1010
126 #define PTP_TRIG_RISE_TS BIT(3)
127 #define PTP_TRIG_FALLING_TS BIT(2)
129 #define CLK_RATE_ADJ_LD BIT(15)
130 #define CLK_RATE_ADJ_DIR BIT(14)
132 #define VEND1_RX_TS_INSRT_CTRL 0x114D
133 #define TJA1103_RX_TS_INSRT_MODE2 0x02
135 #define TJA1120_RX_TS_INSRT_CTRL 0x9012
136 #define TJA1120_RX_TS_INSRT_EN BIT(15)
137 #define TJA1120_TS_INSRT_MODE BIT(4)
139 #define VEND1_EGR_RING_DATA_0 0x114E
140 #define VEND1_EGR_RING_CTRL 0x1154
142 #define RING_DATA_0_TS_VALID BIT(15)
144 #define RING_DONE BIT(0)
146 #define TS_SEC_MASK GENMASK(1, 0)
148 #define VEND1_PORT_FUNC_ENABLES 0x8048
149 #define PTP_ENABLE BIT(3)
150 #define PHY_TEST_ENABLE BIT(0)
152 #define VEND1_PORT_PTP_CONTROL 0x9000
153 #define PORT_PTP_CONTROL_BYPASS BIT(11)
155 #define PTP_CLK_PERIOD_100BT1 15ULL
156 #define PTP_CLK_PERIOD_1000BT1 8ULL
158 #define EVENT_MSG_FILT_ALL 0x0F
159 #define EVENT_MSG_FILT_NONE 0x00
161 #define VEND1_GPIO_FUNC_CONFIG_BASE 0x2C40
162 #define GPIO_FUNC_EN BIT(15)
163 #define GPIO_FUNC_PTP BIT(6)
164 #define GPIO_SIGNAL_PTP_TRIGGER 0x01
165 #define GPIO_SIGNAL_PPS_OUT 0x12
166 #define GPIO_DISABLE 0
167 #define GPIO_PPS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
169 #define GPIO_EXTTS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
170 GPIO_SIGNAL_PTP_TRIGGER)
172 #define RGMII_PERIOD_PS 8000U
173 #define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360)
174 #define MIN_ID_PS 1644U
175 #define MAX_ID_PS 2260U
176 #define DEFAULT_ID_PS 2000U
178 #define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \
179 (ppb) * (ptp_clk_period), NSEC_PER_SEC)
181 #define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
185 struct nxp_c45_skb_cb {
186 struct ptp_header *header;
190 #define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size) \
191 ((struct nxp_c45_reg_field) { \
198 struct nxp_c45_reg_field {
205 struct nxp_c45_hwts {
213 struct nxp_c45_regmap {
214 /* PTP config regs. */
215 u16 vend1_ptp_clk_period;
216 u16 vend1_event_msg_filt;
218 /* LTC bits and regs. */
219 struct nxp_c45_reg_field ltc_read;
220 struct nxp_c45_reg_field ltc_write;
221 struct nxp_c45_reg_field ltc_lock_ctrl;
222 u16 vend1_ltc_wr_nsec_0;
223 u16 vend1_ltc_wr_nsec_1;
224 u16 vend1_ltc_wr_sec_0;
225 u16 vend1_ltc_wr_sec_1;
226 u16 vend1_ltc_rd_nsec_0;
227 u16 vend1_ltc_rd_nsec_1;
228 u16 vend1_ltc_rd_sec_0;
229 u16 vend1_ltc_rd_sec_1;
230 u16 vend1_rate_adj_subns_0;
231 u16 vend1_rate_adj_subns_1;
233 /* External trigger reg fields. */
234 struct nxp_c45_reg_field irq_egr_ts_en;
235 struct nxp_c45_reg_field irq_egr_ts_status;
236 struct nxp_c45_reg_field domain_number;
237 struct nxp_c45_reg_field msg_type;
238 struct nxp_c45_reg_field sequence_id;
239 struct nxp_c45_reg_field sec_1_0;
240 struct nxp_c45_reg_field sec_4_2;
241 struct nxp_c45_reg_field nsec_15_0;
242 struct nxp_c45_reg_field nsec_29_16;
244 /* PPS and EXT Trigger bits and regs. */
245 struct nxp_c45_reg_field pps_enable;
246 struct nxp_c45_reg_field pps_polarity;
247 u16 vend1_ext_trg_data_0;
248 u16 vend1_ext_trg_data_1;
249 u16 vend1_ext_trg_data_2;
250 u16 vend1_ext_trg_data_3;
251 u16 vend1_ext_trg_ctrl;
253 /* Cable test reg fields. */
255 struct nxp_c45_reg_field cable_test_valid;
256 struct nxp_c45_reg_field cable_test_result;
259 struct nxp_c45_phy_stats {
261 const struct nxp_c45_reg_field counter;
264 struct nxp_c45_phy_data {
265 const struct nxp_c45_regmap *regmap;
266 const struct nxp_c45_phy_stats *stats;
269 bool ext_ts_both_edges;
271 void (*counters_enable)(struct phy_device *phydev);
272 bool (*get_egressts)(struct nxp_c45_phy *priv,
273 struct nxp_c45_hwts *hwts);
274 bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts);
275 void (*ptp_init)(struct phy_device *phydev);
276 void (*ptp_enable)(struct phy_device *phydev, bool enable);
277 void (*nmi_handler)(struct phy_device *phydev,
278 irqreturn_t *irq_status);
282 const struct nxp_c45_phy_data *phy_data;
283 struct phy_device *phydev;
284 struct mii_timestamper mii_ts;
285 struct ptp_clock *ptp_clock;
286 struct ptp_clock_info caps;
287 struct sk_buff_head tx_queue;
288 struct sk_buff_head rx_queue;
289 /* used to access the PTP registers atomic */
290 struct mutex ptp_lock;
295 struct timespec64 extts_ts;
301 struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
303 return phydev->drv->driver_data;
307 struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev)
309 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
311 return phy_data->regmap;
314 static int nxp_c45_read_reg_field(struct phy_device *phydev,
315 const struct nxp_c45_reg_field *reg_field)
320 if (reg_field->size == 0) {
321 phydev_err(phydev, "Trying to read a reg field of size 0.\n");
325 ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
329 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
330 GENMASK(reg_field->offset + reg_field->size - 1,
333 ret >>= reg_field->offset;
338 static int nxp_c45_write_reg_field(struct phy_device *phydev,
339 const struct nxp_c45_reg_field *reg_field,
345 if (reg_field->size == 0) {
346 phydev_err(phydev, "Trying to write a reg field of size 0.\n");
350 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
351 GENMASK(reg_field->offset + reg_field->size - 1,
353 set = val << reg_field->offset;
355 return phy_modify_mmd_changed(phydev, reg_field->devad,
356 reg_field->reg, mask, set);
359 static int nxp_c45_set_reg_field(struct phy_device *phydev,
360 const struct nxp_c45_reg_field *reg_field)
362 if (reg_field->size != 1) {
363 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
367 return nxp_c45_write_reg_field(phydev, reg_field, 1);
370 static int nxp_c45_clear_reg_field(struct phy_device *phydev,
371 const struct nxp_c45_reg_field *reg_field)
373 if (reg_field->size != 1) {
374 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
378 return nxp_c45_write_reg_field(phydev, reg_field, 0);
381 static bool nxp_c45_poll_txts(struct phy_device *phydev)
383 return phydev->irq <= 0;
386 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
387 struct timespec64 *ts,
388 struct ptp_system_timestamp *sts)
390 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
391 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
393 nxp_c45_set_reg_field(priv->phydev, ®map->ltc_read);
394 ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
395 regmap->vend1_ltc_rd_nsec_0);
396 ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
397 regmap->vend1_ltc_rd_nsec_1) << 16;
398 ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
399 regmap->vend1_ltc_rd_sec_0);
400 ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
401 regmap->vend1_ltc_rd_sec_1) << 16;
406 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
407 struct timespec64 *ts,
408 struct ptp_system_timestamp *sts)
410 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
412 mutex_lock(&priv->ptp_lock);
413 _nxp_c45_ptp_gettimex64(ptp, ts, sts);
414 mutex_unlock(&priv->ptp_lock);
419 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
420 const struct timespec64 *ts)
422 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
423 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
425 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
427 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
429 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
431 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
433 nxp_c45_set_reg_field(priv->phydev, ®map->ltc_write);
438 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
439 const struct timespec64 *ts)
441 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
443 mutex_lock(&priv->ptp_lock);
444 _nxp_c45_ptp_settime64(ptp, ts);
445 mutex_unlock(&priv->ptp_lock);
450 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
452 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
453 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
454 const struct nxp_c45_regmap *regmap = data->regmap;
455 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
459 mutex_lock(&priv->ptp_lock);
463 subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
465 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
466 regmap->vend1_rate_adj_subns_0,
468 subns_inc_val >>= 16;
469 subns_inc_val |= CLK_RATE_ADJ_LD;
471 subns_inc_val |= CLK_RATE_ADJ_DIR;
473 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
474 regmap->vend1_rate_adj_subns_1,
476 mutex_unlock(&priv->ptp_lock);
481 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
483 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
484 struct timespec64 now, then;
486 mutex_lock(&priv->ptp_lock);
487 then = ns_to_timespec64(delta);
488 _nxp_c45_ptp_gettimex64(ptp, &now, NULL);
489 now = timespec64_add(now, then);
490 _nxp_c45_ptp_settime64(ptp, &now);
491 mutex_unlock(&priv->ptp_lock);
496 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
497 struct nxp_c45_hwts *hwts)
499 ts->tv_nsec = hwts->nsec;
500 if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
501 ts->tv_sec -= TS_SEC_MASK + 1;
502 ts->tv_sec &= ~TS_SEC_MASK;
503 ts->tv_sec |= hwts->sec & TS_SEC_MASK;
506 static bool nxp_c45_match_ts(struct ptp_header *header,
507 struct nxp_c45_hwts *hwts,
510 return ntohs(header->sequence_id) == hwts->sequence_id &&
511 ptp_get_msgtype(header, type) == hwts->msg_type &&
512 header->domain_number == hwts->domain_number;
515 static bool nxp_c45_get_extts(struct nxp_c45_phy *priv,
516 struct timespec64 *extts)
518 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
520 extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
521 regmap->vend1_ext_trg_data_0);
522 extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
523 regmap->vend1_ext_trg_data_1) << 16;
524 extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
525 regmap->vend1_ext_trg_data_2);
526 extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
527 regmap->vend1_ext_trg_data_3) << 16;
528 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
529 regmap->vend1_ext_trg_ctrl, RING_DONE);
534 static bool tja1120_extts_is_valid(struct phy_device *phydev)
539 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
540 TJA1120_VEND1_PTP_TRIG_DATA_S);
541 valid = !!(reg & TJA1120_TS_VALID);
546 static bool tja1120_get_extts(struct nxp_c45_phy *priv,
547 struct timespec64 *extts)
549 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
550 struct phy_device *phydev = priv->phydev;
555 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
556 regmap->vend1_ext_trg_ctrl);
557 more_ts = !!(reg & TJA1120_MORE_TS);
559 valid = tja1120_extts_is_valid(phydev);
562 goto tja1120_get_extts_out;
564 /* Bug workaround for TJA1120 engineering samples: move the new
565 * timestamp from the FIFO to the buffer.
567 phy_write_mmd(phydev, MDIO_MMD_VEND1,
568 regmap->vend1_ext_trg_ctrl, RING_DONE);
569 valid = tja1120_extts_is_valid(phydev);
571 goto tja1120_get_extts_out;
574 nxp_c45_get_extts(priv, extts);
575 tja1120_get_extts_out:
579 static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv,
580 struct nxp_c45_hwts *hwts)
582 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
583 struct phy_device *phydev = priv->phydev;
585 hwts->domain_number =
586 nxp_c45_read_reg_field(phydev, ®map->domain_number);
588 nxp_c45_read_reg_field(phydev, ®map->msg_type);
590 nxp_c45_read_reg_field(phydev, ®map->sequence_id);
592 nxp_c45_read_reg_field(phydev, ®map->nsec_15_0);
594 nxp_c45_read_reg_field(phydev, ®map->nsec_29_16) << 16;
595 hwts->sec = nxp_c45_read_reg_field(phydev, ®map->sec_1_0);
596 hwts->sec |= nxp_c45_read_reg_field(phydev, ®map->sec_4_2) << 2;
599 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
600 struct nxp_c45_hwts *hwts)
605 mutex_lock(&priv->ptp_lock);
606 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
608 reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
609 valid = !!(reg & RING_DATA_0_TS_VALID);
611 goto nxp_c45_get_hwtxts_out;
613 nxp_c45_read_egress_ts(priv, hwts);
614 nxp_c45_get_hwtxts_out:
615 mutex_unlock(&priv->ptp_lock);
619 static bool tja1120_egress_ts_is_valid(struct phy_device *phydev)
624 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S);
625 valid = !!(reg & TJA1120_TS_VALID);
630 static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv,
631 struct nxp_c45_hwts *hwts)
633 struct phy_device *phydev = priv->phydev;
638 mutex_lock(&priv->ptp_lock);
639 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END);
640 more_ts = !!(reg & TJA1120_MORE_TS);
641 valid = tja1120_egress_ts_is_valid(phydev);
644 goto tja1120_get_hwtxts_out;
646 /* Bug workaround for TJA1120 engineering samples: move the
647 * new timestamp from the FIFO to the buffer.
649 phy_write_mmd(phydev, MDIO_MMD_VEND1,
650 TJA1120_EGRESS_TS_END, TJA1120_TS_VALID);
651 valid = tja1120_egress_ts_is_valid(phydev);
653 goto tja1120_get_hwtxts_out;
655 nxp_c45_read_egress_ts(priv, hwts);
656 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S,
658 tja1120_get_hwtxts_out:
659 mutex_unlock(&priv->ptp_lock);
663 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
664 struct nxp_c45_hwts *txts)
666 struct sk_buff *skb, *tmp, *skb_match = NULL;
667 struct skb_shared_hwtstamps shhwtstamps;
668 struct timespec64 ts;
673 spin_lock_irqsave(&priv->tx_queue.lock, flags);
674 skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
675 ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
676 NXP_C45_SKB_CB(skb)->type);
680 __skb_unlink(skb, &priv->tx_queue);
683 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
686 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
687 nxp_c45_reconstruct_ts(&ts, txts);
688 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
689 ts_ns = timespec64_to_ns(&ts);
690 shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
691 skb_complete_tx_timestamp(skb_match, &shhwtstamps);
693 phydev_warn(priv->phydev,
694 "the tx timestamp doesn't match with any skb\n");
698 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
700 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
701 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
702 bool poll_txts = nxp_c45_poll_txts(priv->phydev);
703 struct skb_shared_hwtstamps *shhwtstamps_rx;
704 struct ptp_clock_event event;
705 struct nxp_c45_hwts hwts;
706 bool reschedule = false;
707 struct timespec64 ts;
712 while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
713 ts_valid = data->get_egressts(priv, &hwts);
714 if (unlikely(!ts_valid)) {
715 /* Still more skbs in the queue */
720 nxp_c45_process_txts(priv, &hwts);
723 while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
724 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
725 ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
726 hwts.sec = ts_raw >> 30;
727 hwts.nsec = ts_raw & GENMASK(29, 0);
728 nxp_c45_reconstruct_ts(&ts, &hwts);
729 shhwtstamps_rx = skb_hwtstamps(skb);
730 shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
731 NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
736 ts_valid = data->get_extts(priv, &ts);
737 if (ts_valid && timespec64_compare(&ts, &priv->extts_ts) != 0) {
739 event.index = priv->extts_index;
740 event.type = PTP_CLOCK_EXTTS;
741 event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
742 ptp_clock_event(priv->ptp_clock, &event);
747 return reschedule ? 1 : -1;
750 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
751 int pin, u16 pin_cfg)
753 struct phy_device *phydev = priv->phydev;
755 phy_write_mmd(phydev, MDIO_MMD_VEND1,
756 VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
759 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
760 struct ptp_perout_request *perout, int on)
762 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
763 struct phy_device *phydev = priv->phydev;
766 if (perout->flags & ~PTP_PEROUT_PHASE)
769 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
774 nxp_c45_clear_reg_field(priv->phydev,
775 ®map->pps_enable);
776 nxp_c45_clear_reg_field(priv->phydev,
777 ®map->pps_polarity);
779 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
784 /* The PPS signal is fixed to 1 second and is always generated when the
785 * seconds counter is incremented. The start time is not configurable.
786 * If the clock is adjusted, the PPS signal is automatically readjusted.
788 if (perout->period.sec != 1 || perout->period.nsec != 0) {
789 phydev_warn(phydev, "The period can be set only to 1 second.");
793 if (!(perout->flags & PTP_PEROUT_PHASE)) {
794 if (perout->start.sec != 0 || perout->start.nsec != 0) {
795 phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
799 if (perout->phase.nsec != 0 &&
800 perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
801 phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
805 if (perout->phase.nsec == 0)
806 nxp_c45_clear_reg_field(priv->phydev,
807 ®map->pps_polarity);
809 nxp_c45_set_reg_field(priv->phydev,
810 ®map->pps_polarity);
813 nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
815 nxp_c45_set_reg_field(priv->phydev, ®map->pps_enable);
820 static void nxp_c45_set_rising_or_falling(struct phy_device *phydev,
821 struct ptp_extts_request *extts)
823 if (extts->flags & PTP_RISING_EDGE)
824 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
825 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
827 if (extts->flags & PTP_FALLING_EDGE)
828 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
829 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
832 static void nxp_c45_set_rising_and_falling(struct phy_device *phydev,
833 struct ptp_extts_request *extts)
835 /* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In
836 * this case external ts will be enabled on rising edge.
838 if (extts->flags & PTP_RISING_EDGE ||
839 extts->flags == PTP_ENABLE_FEATURE)
840 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
841 TJA1120_SYNC_TRIG_FILTER,
844 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
845 TJA1120_SYNC_TRIG_FILTER,
848 if (extts->flags & PTP_FALLING_EDGE)
849 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
850 TJA1120_SYNC_TRIG_FILTER,
851 PTP_TRIG_FALLING_TS);
853 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
854 TJA1120_SYNC_TRIG_FILTER,
855 PTP_TRIG_FALLING_TS);
858 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
859 struct ptp_extts_request *extts, int on)
861 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
864 if (extts->flags & ~(PTP_ENABLE_FEATURE |
870 /* Sampling on both edges is not supported */
871 if ((extts->flags & PTP_RISING_EDGE) &&
872 (extts->flags & PTP_FALLING_EDGE) &&
873 !data->ext_ts_both_edges)
876 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
881 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
887 if (data->ext_ts_both_edges)
888 nxp_c45_set_rising_and_falling(priv->phydev, extts);
890 nxp_c45_set_rising_or_falling(priv->phydev, extts);
892 nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
894 priv->extts_index = extts->index;
895 ptp_schedule_worker(priv->ptp_clock, 0);
900 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
901 struct ptp_clock_request *req, int on)
903 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
906 case PTP_CLK_REQ_EXTTS:
907 return nxp_c45_extts_enable(priv, &req->extts, on);
908 case PTP_CLK_REQ_PEROUT:
909 return nxp_c45_perout_enable(priv, &req->perout, on);
915 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
916 { "nxp_c45_gpio0", 0, PTP_PF_NONE},
917 { "nxp_c45_gpio1", 1, PTP_PF_NONE},
918 { "nxp_c45_gpio2", 2, PTP_PF_NONE},
919 { "nxp_c45_gpio3", 3, PTP_PF_NONE},
920 { "nxp_c45_gpio4", 4, PTP_PF_NONE},
921 { "nxp_c45_gpio5", 5, PTP_PF_NONE},
922 { "nxp_c45_gpio6", 6, PTP_PF_NONE},
923 { "nxp_c45_gpio7", 7, PTP_PF_NONE},
924 { "nxp_c45_gpio8", 8, PTP_PF_NONE},
925 { "nxp_c45_gpio9", 9, PTP_PF_NONE},
926 { "nxp_c45_gpio10", 10, PTP_PF_NONE},
927 { "nxp_c45_gpio11", 11, PTP_PF_NONE},
930 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
931 enum ptp_pin_function func, unsigned int chan)
933 if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
948 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
950 priv->caps = (struct ptp_clock_info) {
951 .owner = THIS_MODULE,
952 .name = "NXP C45 PHC",
954 .adjfine = nxp_c45_ptp_adjfine,
955 .adjtime = nxp_c45_ptp_adjtime,
956 .gettimex64 = nxp_c45_ptp_gettimex64,
957 .settime64 = nxp_c45_ptp_settime64,
958 .enable = nxp_c45_ptp_enable,
959 .verify = nxp_c45_ptp_verify_pin,
960 .do_aux_work = nxp_c45_do_aux_work,
961 .pin_config = nxp_c45_ptp_pins,
962 .n_pins = ARRAY_SIZE(nxp_c45_ptp_pins),
967 priv->ptp_clock = ptp_clock_register(&priv->caps,
968 &priv->phydev->mdio.dev);
970 if (IS_ERR(priv->ptp_clock))
971 return PTR_ERR(priv->ptp_clock);
973 if (!priv->ptp_clock)
979 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
980 struct sk_buff *skb, int type)
982 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
985 switch (priv->hwts_tx) {
987 NXP_C45_SKB_CB(skb)->type = type;
988 NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
989 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
990 skb_queue_tail(&priv->tx_queue, skb);
991 if (nxp_c45_poll_txts(priv->phydev))
992 ptp_schedule_worker(priv->ptp_clock, 0);
994 case HWTSTAMP_TX_OFF:
1001 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
1002 struct sk_buff *skb, int type)
1004 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1006 struct ptp_header *header = ptp_parse_header(skb, type);
1014 NXP_C45_SKB_CB(skb)->header = header;
1015 skb_queue_tail(&priv->rx_queue, skb);
1016 ptp_schedule_worker(priv->ptp_clock, 0);
1021 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
1022 struct ifreq *ifreq)
1024 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1026 struct phy_device *phydev = priv->phydev;
1027 const struct nxp_c45_phy_data *data;
1028 struct hwtstamp_config cfg;
1030 if (copy_from_user(&cfg, ifreq->ifr_data, sizeof(cfg)))
1033 if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ON)
1036 data = nxp_c45_get_data(phydev);
1037 priv->hwts_tx = cfg.tx_type;
1039 switch (cfg.rx_filter) {
1040 case HWTSTAMP_FILTER_NONE:
1043 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1044 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1045 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1047 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1053 if (priv->hwts_rx || priv->hwts_tx) {
1054 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1055 data->regmap->vend1_event_msg_filt,
1056 EVENT_MSG_FILT_ALL);
1057 data->ptp_enable(phydev, true);
1059 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1060 data->regmap->vend1_event_msg_filt,
1061 EVENT_MSG_FILT_NONE);
1062 data->ptp_enable(phydev, false);
1065 if (nxp_c45_poll_txts(priv->phydev))
1066 goto nxp_c45_no_ptp_irq;
1069 nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1071 nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1074 return copy_to_user(ifreq->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1077 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
1078 struct ethtool_ts_info *ts_info)
1080 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1083 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1084 SOF_TIMESTAMPING_RX_HARDWARE |
1085 SOF_TIMESTAMPING_RAW_HARDWARE;
1086 ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
1087 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1088 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1089 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1090 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1091 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
1096 static const struct nxp_c45_phy_stats common_hw_stats[] = {
1097 { "phy_link_status_drop_cnt",
1098 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), },
1099 { "phy_link_availability_drop_cnt",
1100 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), },
1101 { "phy_link_loss_cnt",
1102 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), },
1103 { "phy_link_failure_cnt",
1104 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), },
1105 { "phy_symbol_error_cnt",
1106 NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) },
1109 static const struct nxp_c45_phy_stats tja1103_hw_stats[] = {
1110 { "rx_preamble_count",
1111 NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), },
1112 { "tx_preamble_count",
1113 NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), },
1115 NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), },
1117 NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), },
1120 static const struct nxp_c45_phy_stats tja1120_hw_stats[] = {
1121 { "phy_symbol_error_cnt_ext",
1122 NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) },
1124 NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), },
1126 NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), },
1128 NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), },
1130 NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), },
1131 { "tx_lost_frames_xtd",
1132 NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), },
1134 NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), },
1135 { "rx_lost_frames_xtd",
1136 NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), },
1138 NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), },
1141 static int nxp_c45_get_sset_count(struct phy_device *phydev)
1143 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1145 return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
1148 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
1150 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1151 size_t count = nxp_c45_get_sset_count(phydev);
1155 for (i = 0; i < count; i++) {
1156 if (i < ARRAY_SIZE(common_hw_stats)) {
1157 strscpy(data + i * ETH_GSTRING_LEN,
1158 common_hw_stats[i].name, ETH_GSTRING_LEN);
1161 idx = i - ARRAY_SIZE(common_hw_stats);
1162 strscpy(data + i * ETH_GSTRING_LEN,
1163 phy_data->stats[idx].name, ETH_GSTRING_LEN);
1167 static void nxp_c45_get_stats(struct phy_device *phydev,
1168 struct ethtool_stats *stats, u64 *data)
1170 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1171 size_t count = nxp_c45_get_sset_count(phydev);
1172 const struct nxp_c45_reg_field *reg_field;
1177 for (i = 0; i < count; i++) {
1178 if (i < ARRAY_SIZE(common_hw_stats)) {
1179 reg_field = &common_hw_stats[i].counter;
1181 idx = i - ARRAY_SIZE(common_hw_stats);
1182 reg_field = &phy_data->stats[idx].counter;
1185 ret = nxp_c45_read_reg_field(phydev, reg_field);
1193 static int nxp_c45_config_enable(struct phy_device *phydev)
1195 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1196 DEVICE_CONTROL_CONFIG_GLOBAL_EN |
1197 DEVICE_CONTROL_CONFIG_ALL_EN);
1198 usleep_range(400, 450);
1200 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
1202 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1204 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
1205 PORT_INFRA_CONTROL_EN);
1210 static int nxp_c45_start_op(struct phy_device *phydev)
1212 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1216 static int nxp_c45_config_intr(struct phy_device *phydev)
1218 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1219 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1220 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1222 return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1223 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1226 static int tja1103_config_intr(struct phy_device *phydev)
1230 /* We can't disable the FUSA IRQ for TJA1103, but we can clean it up. */
1231 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_ALWAYS_ACCESSIBLE,
1236 return nxp_c45_config_intr(phydev);
1239 static int tja1120_config_intr(struct phy_device *phydev)
1243 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1244 ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1245 TJA1120_GLOBAL_INFRA_IRQ_EN,
1246 TJA1120_DEV_BOOT_DONE);
1248 ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1249 TJA1120_GLOBAL_INFRA_IRQ_EN,
1250 TJA1120_DEV_BOOT_DONE);
1254 return nxp_c45_config_intr(phydev);
1257 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
1259 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1260 struct nxp_c45_phy *priv = phydev->priv;
1261 irqreturn_t ret = IRQ_NONE;
1262 struct nxp_c45_hwts hwts;
1265 irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
1266 if (irq & PHY_IRQ_LINK_EVENT) {
1267 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
1268 PHY_IRQ_LINK_EVENT);
1269 phy_trigger_machine(phydev);
1273 irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1275 /* If ack_ptp_irq is false, the IRQ bit is self-clear and will
1276 * be cleared when the EGR TS FIFO is empty. Otherwise, the
1277 * IRQ bit should be cleared before reading the timestamp,
1279 if (data->ack_ptp_irq)
1280 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1281 VEND1_PTP_IRQ_ACK, EGR_TS_IRQ);
1282 while (data->get_egressts(priv, &hwts))
1283 nxp_c45_process_txts(priv, &hwts);
1288 data->nmi_handler(phydev, &ret);
1293 static int nxp_c45_soft_reset(struct phy_device *phydev)
1297 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1298 DEVICE_CONTROL_RESET);
1302 return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
1303 VEND1_DEVICE_CONTROL, ret,
1304 !(ret & DEVICE_CONTROL_RESET), 20000,
1308 static int nxp_c45_cable_test_start(struct phy_device *phydev)
1310 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1312 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1313 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1314 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1315 CABLE_TEST_ENABLE | CABLE_TEST_START);
1318 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
1321 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1323 u8 cable_test_result;
1325 ret = nxp_c45_read_reg_field(phydev, ®map->cable_test_valid);
1332 cable_test_result = nxp_c45_read_reg_field(phydev,
1333 ®map->cable_test_result);
1335 switch (cable_test_result) {
1337 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1338 ETHTOOL_A_CABLE_RESULT_CODE_OK);
1340 case CABLE_TEST_SHORTED:
1341 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1342 ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
1344 case CABLE_TEST_OPEN:
1345 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1346 ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
1349 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1350 ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
1353 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1355 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1356 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1358 return nxp_c45_start_op(phydev);
1361 static int nxp_c45_get_sqi(struct phy_device *phydev)
1365 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1366 if (!(reg & SQI_VALID))
1374 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1379 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1381 if (delay < MIN_ID_PS) {
1382 phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1386 if (delay > MAX_ID_PS) {
1387 phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1394 static void nxp_c45_counters_enable(struct phy_device *phydev)
1396 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1398 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1401 data->counters_enable(phydev);
1404 static void nxp_c45_ptp_init(struct phy_device *phydev)
1406 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1408 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1409 data->regmap->vend1_ptp_clk_period,
1410 data->ptp_clk_period);
1411 nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1413 data->ptp_init(phydev);
1416 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1418 /* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1419 * To avoid floating point operations we'll multiply by 10
1420 * and get 1 decimal point precision.
1422 phase_offset_raw *= 10;
1423 phase_offset_raw -= 738;
1424 return div_u64(phase_offset_raw, 9);
1427 static void nxp_c45_disable_delays(struct phy_device *phydev)
1429 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1430 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1433 static void nxp_c45_set_delays(struct phy_device *phydev)
1435 struct nxp_c45_phy *priv = phydev->priv;
1436 u64 tx_delay = priv->tx_delay;
1437 u64 rx_delay = priv->rx_delay;
1440 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1441 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1442 degree = div_u64(tx_delay, PS_PER_DEGREE);
1443 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1444 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1446 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1450 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1451 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1452 degree = div_u64(rx_delay, PS_PER_DEGREE);
1453 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1454 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1456 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1461 static int nxp_c45_get_delays(struct phy_device *phydev)
1463 struct nxp_c45_phy *priv = phydev->priv;
1466 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1467 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1468 ret = device_property_read_u32(&phydev->mdio.dev,
1469 "tx-internal-delay-ps",
1472 priv->tx_delay = DEFAULT_ID_PS;
1474 ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1477 "tx-internal-delay-ps invalid value\n");
1482 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1483 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1484 ret = device_property_read_u32(&phydev->mdio.dev,
1485 "rx-internal-delay-ps",
1488 priv->rx_delay = DEFAULT_ID_PS;
1490 ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1493 "rx-internal-delay-ps invalid value\n");
1501 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1505 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1506 phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1508 switch (phydev->interface) {
1509 case PHY_INTERFACE_MODE_RGMII:
1510 if (!(ret & RGMII_ABILITY)) {
1511 phydev_err(phydev, "rgmii mode not supported\n");
1514 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1515 MII_BASIC_CONFIG_RGMII);
1516 nxp_c45_disable_delays(phydev);
1518 case PHY_INTERFACE_MODE_RGMII_ID:
1519 case PHY_INTERFACE_MODE_RGMII_TXID:
1520 case PHY_INTERFACE_MODE_RGMII_RXID:
1521 if (!(ret & RGMII_ID_ABILITY)) {
1522 phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1525 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1526 MII_BASIC_CONFIG_RGMII);
1527 ret = nxp_c45_get_delays(phydev);
1531 nxp_c45_set_delays(phydev);
1533 case PHY_INTERFACE_MODE_MII:
1534 if (!(ret & MII_ABILITY)) {
1535 phydev_err(phydev, "mii mode not supported\n");
1538 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1539 MII_BASIC_CONFIG_MII);
1541 case PHY_INTERFACE_MODE_REVMII:
1542 if (!(ret & REVMII_ABILITY)) {
1543 phydev_err(phydev, "rev-mii mode not supported\n");
1546 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1547 MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1549 case PHY_INTERFACE_MODE_RMII:
1550 if (!(ret & RMII_ABILITY)) {
1551 phydev_err(phydev, "rmii mode not supported\n");
1554 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1555 MII_BASIC_CONFIG_RMII);
1557 case PHY_INTERFACE_MODE_SGMII:
1558 if (!(ret & SGMII_ABILITY)) {
1559 phydev_err(phydev, "sgmii mode not supported\n");
1562 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1563 MII_BASIC_CONFIG_SGMII);
1565 case PHY_INTERFACE_MODE_INTERNAL:
1574 static int nxp_c45_config_init(struct phy_device *phydev)
1578 ret = nxp_c45_config_enable(phydev);
1580 phydev_err(phydev, "Failed to enable config\n");
1584 /* Bug workaround for SJA1110 rev B: enable write access
1585 * to MDIO_MMD_PMAPMD
1587 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1588 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1590 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1593 ret = nxp_c45_set_phy_mode(phydev);
1597 phydev->autoneg = AUTONEG_DISABLE;
1599 nxp_c45_counters_enable(phydev);
1600 nxp_c45_ptp_init(phydev);
1602 return nxp_c45_start_op(phydev);
1605 static int nxp_c45_get_features(struct phy_device *phydev)
1607 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1608 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1610 return genphy_c45_pma_read_abilities(phydev);
1613 static int nxp_c45_probe(struct phy_device *phydev)
1615 struct nxp_c45_phy *priv;
1619 priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1623 skb_queue_head_init(&priv->tx_queue);
1624 skb_queue_head_init(&priv->rx_queue);
1626 priv->phydev = phydev;
1628 phydev->priv = priv;
1630 mutex_init(&priv->ptp_lock);
1632 ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1633 VEND1_PORT_ABILITIES);
1634 ptp_ability = !!(ptp_ability & PTP_ABILITY);
1636 phydev_dbg(phydev, "the phy does not support PTP");
1637 goto no_ptp_support;
1640 if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1641 IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1642 priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1643 priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1644 priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1645 priv->mii_ts.ts_info = nxp_c45_ts_info;
1646 phydev->mii_ts = &priv->mii_ts;
1647 ret = nxp_c45_init_ptp_clock(priv);
1649 phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1657 static void nxp_c45_remove(struct phy_device *phydev)
1659 struct nxp_c45_phy *priv = phydev->priv;
1661 if (priv->ptp_clock)
1662 ptp_clock_unregister(priv->ptp_clock);
1664 skb_queue_purge(&priv->tx_queue);
1665 skb_queue_purge(&priv->rx_queue);
1668 static void tja1103_counters_enable(struct phy_device *phydev)
1670 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1672 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1674 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1676 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1680 static void tja1103_ptp_init(struct phy_device *phydev)
1682 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1683 TJA1103_RX_TS_INSRT_MODE2);
1684 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1688 static void tja1103_ptp_enable(struct phy_device *phydev, bool enable)
1691 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1692 VEND1_PORT_PTP_CONTROL,
1693 PORT_PTP_CONTROL_BYPASS);
1695 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1696 VEND1_PORT_PTP_CONTROL,
1697 PORT_PTP_CONTROL_BYPASS);
1700 static void tja1103_nmi_handler(struct phy_device *phydev,
1701 irqreturn_t *irq_status)
1705 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1706 VEND1_ALWAYS_ACCESSIBLE);
1707 if (ret & FUSA_PASS) {
1708 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1709 VEND1_ALWAYS_ACCESSIBLE,
1711 *irq_status = IRQ_HANDLED;
1715 static const struct nxp_c45_regmap tja1103_regmap = {
1716 .vend1_ptp_clk_period = 0x1104,
1717 .vend1_event_msg_filt = 0x1148,
1719 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1),
1721 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1),
1723 NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1),
1725 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1),
1727 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1),
1728 .vend1_ltc_wr_nsec_0 = 0x1106,
1729 .vend1_ltc_wr_nsec_1 = 0x1107,
1730 .vend1_ltc_wr_sec_0 = 0x1108,
1731 .vend1_ltc_wr_sec_1 = 0x1109,
1732 .vend1_ltc_rd_nsec_0 = 0x110A,
1733 .vend1_ltc_rd_nsec_1 = 0x110B,
1734 .vend1_ltc_rd_sec_0 = 0x110C,
1735 .vend1_ltc_rd_sec_1 = 0x110D,
1736 .vend1_rate_adj_subns_0 = 0x110F,
1737 .vend1_rate_adj_subns_1 = 0x1110,
1739 NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1),
1740 .irq_egr_ts_status =
1741 NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1),
1743 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8),
1745 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4),
1747 NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16),
1749 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2),
1751 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3),
1753 NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16),
1755 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14),
1756 .vend1_ext_trg_data_0 = 0x1121,
1757 .vend1_ext_trg_data_1 = 0x1122,
1758 .vend1_ext_trg_data_2 = 0x1123,
1759 .vend1_ext_trg_data_3 = 0x1124,
1760 .vend1_ext_trg_ctrl = 0x1126,
1761 .cable_test = 0x8330,
1763 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1),
1764 .cable_test_result =
1765 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3),
1768 static const struct nxp_c45_phy_data tja1103_phy_data = {
1769 .regmap = &tja1103_regmap,
1770 .stats = tja1103_hw_stats,
1771 .n_stats = ARRAY_SIZE(tja1103_hw_stats),
1772 .ptp_clk_period = PTP_CLK_PERIOD_100BT1,
1773 .ext_ts_both_edges = false,
1774 .ack_ptp_irq = false,
1775 .counters_enable = tja1103_counters_enable,
1776 .get_egressts = nxp_c45_get_hwtxts,
1777 .get_extts = nxp_c45_get_extts,
1778 .ptp_init = tja1103_ptp_init,
1779 .ptp_enable = tja1103_ptp_enable,
1780 .nmi_handler = tja1103_nmi_handler,
1783 static void tja1120_counters_enable(struct phy_device *phydev)
1785 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD,
1787 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS,
1789 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG,
1790 ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN);
1793 static void tja1120_ptp_init(struct phy_device *phydev)
1795 phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL,
1796 TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE);
1797 phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE,
1798 TJA1120_TS_INSRT_MODE);
1799 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG,
1803 static void tja1120_ptp_enable(struct phy_device *phydev, bool enable)
1806 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1807 VEND1_PORT_FUNC_ENABLES,
1810 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1811 VEND1_PORT_FUNC_ENABLES,
1815 static void tja1120_nmi_handler(struct phy_device *phydev,
1816 irqreturn_t *irq_status)
1820 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1821 TJA1120_GLOBAL_INFRA_IRQ_STATUS);
1822 if (ret & TJA1120_DEV_BOOT_DONE) {
1823 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1824 TJA1120_GLOBAL_INFRA_IRQ_ACK,
1825 TJA1120_DEV_BOOT_DONE);
1826 *irq_status = IRQ_HANDLED;
1830 static const struct nxp_c45_regmap tja1120_regmap = {
1831 .vend1_ptp_clk_period = 0x1020,
1832 .vend1_event_msg_filt = 0x9010,
1834 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1),
1836 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1),
1838 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1),
1840 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1),
1842 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1),
1843 .vend1_ltc_wr_nsec_0 = 0x1040,
1844 .vend1_ltc_wr_nsec_1 = 0x1041,
1845 .vend1_ltc_wr_sec_0 = 0x1042,
1846 .vend1_ltc_wr_sec_1 = 0x1043,
1847 .vend1_ltc_rd_nsec_0 = 0x1048,
1848 .vend1_ltc_rd_nsec_1 = 0x1049,
1849 .vend1_ltc_rd_sec_0 = 0x104A,
1850 .vend1_ltc_rd_sec_1 = 0x104B,
1851 .vend1_rate_adj_subns_0 = 0x1030,
1852 .vend1_rate_adj_subns_1 = 0x1031,
1854 NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1),
1855 .irq_egr_ts_status =
1856 NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1),
1858 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8),
1860 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4),
1862 NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16),
1864 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2),
1866 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3),
1868 NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16),
1870 NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14),
1871 .vend1_ext_trg_data_0 = 0x1071,
1872 .vend1_ext_trg_data_1 = 0x1072,
1873 .vend1_ext_trg_data_2 = 0x1073,
1874 .vend1_ext_trg_data_3 = 0x1074,
1875 .vend1_ext_trg_ctrl = 0x1075,
1876 .cable_test = 0x8360,
1878 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1),
1879 .cable_test_result =
1880 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3),
1883 static const struct nxp_c45_phy_data tja1120_phy_data = {
1884 .regmap = &tja1120_regmap,
1885 .stats = tja1120_hw_stats,
1886 .n_stats = ARRAY_SIZE(tja1120_hw_stats),
1887 .ptp_clk_period = PTP_CLK_PERIOD_1000BT1,
1888 .ext_ts_both_edges = true,
1889 .ack_ptp_irq = true,
1890 .counters_enable = tja1120_counters_enable,
1891 .get_egressts = tja1120_get_hwtxts,
1892 .get_extts = tja1120_get_extts,
1893 .ptp_init = tja1120_ptp_init,
1894 .ptp_enable = tja1120_ptp_enable,
1895 .nmi_handler = tja1120_nmi_handler,
1898 static struct phy_driver nxp_c45_driver[] = {
1900 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
1901 .name = "NXP C45 TJA1103",
1902 .get_features = nxp_c45_get_features,
1903 .driver_data = &tja1103_phy_data,
1904 .probe = nxp_c45_probe,
1905 .soft_reset = nxp_c45_soft_reset,
1906 .config_aneg = genphy_c45_config_aneg,
1907 .config_init = nxp_c45_config_init,
1908 .config_intr = tja1103_config_intr,
1909 .handle_interrupt = nxp_c45_handle_interrupt,
1910 .read_status = genphy_c45_read_status,
1911 .suspend = genphy_c45_pma_suspend,
1912 .resume = genphy_c45_pma_resume,
1913 .get_sset_count = nxp_c45_get_sset_count,
1914 .get_strings = nxp_c45_get_strings,
1915 .get_stats = nxp_c45_get_stats,
1916 .cable_test_start = nxp_c45_cable_test_start,
1917 .cable_test_get_status = nxp_c45_cable_test_get_status,
1918 .set_loopback = genphy_c45_loopback,
1919 .get_sqi = nxp_c45_get_sqi,
1920 .get_sqi_max = nxp_c45_get_sqi_max,
1921 .remove = nxp_c45_remove,
1924 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
1925 .name = "NXP C45 TJA1120",
1926 .get_features = nxp_c45_get_features,
1927 .driver_data = &tja1120_phy_data,
1928 .probe = nxp_c45_probe,
1929 .soft_reset = nxp_c45_soft_reset,
1930 .config_aneg = genphy_c45_config_aneg,
1931 .config_init = nxp_c45_config_init,
1932 .config_intr = tja1120_config_intr,
1933 .handle_interrupt = nxp_c45_handle_interrupt,
1934 .read_status = genphy_c45_read_status,
1935 .suspend = genphy_c45_pma_suspend,
1936 .resume = genphy_c45_pma_resume,
1937 .get_sset_count = nxp_c45_get_sset_count,
1938 .get_strings = nxp_c45_get_strings,
1939 .get_stats = nxp_c45_get_stats,
1940 .cable_test_start = nxp_c45_cable_test_start,
1941 .cable_test_get_status = nxp_c45_cable_test_get_status,
1942 .set_loopback = genphy_c45_loopback,
1943 .get_sqi = nxp_c45_get_sqi,
1944 .get_sqi_max = nxp_c45_get_sqi_max,
1945 .remove = nxp_c45_remove,
1949 module_phy_driver(nxp_c45_driver);
1951 static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
1952 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
1953 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
1957 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
1959 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
1960 MODULE_DESCRIPTION("NXP C45 PHY driver");
1961 MODULE_LICENSE("GPL v2");