1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 NXP
4 * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/phy.h>
14 #include <linux/processor.h>
15 #include <linux/property.h>
16 #include <linux/ptp_classify.h>
17 #include <linux/ptp_clock_kernel.h>
18 #include <linux/net_tstamp.h>
20 #define PHY_ID_TJA_1103 0x001BB010
22 #define PMAPMD_B100T1_PMAPMD_CTL 0x0834
23 #define B100T1_PMAPMD_CONFIG_EN BIT(15)
24 #define B100T1_PMAPMD_MASTER BIT(14)
25 #define MASTER_MODE (B100T1_PMAPMD_CONFIG_EN | \
27 #define SLAVE_MODE (B100T1_PMAPMD_CONFIG_EN)
29 #define VEND1_DEVICE_CONTROL 0x0040
30 #define DEVICE_CONTROL_RESET BIT(15)
31 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
32 #define DEVICE_CONTROL_CONFIG_ALL_EN BIT(13)
34 #define VEND1_PHY_IRQ_ACK 0x80A0
35 #define VEND1_PHY_IRQ_EN 0x80A1
36 #define VEND1_PHY_IRQ_STATUS 0x80A2
37 #define PHY_IRQ_LINK_EVENT BIT(1)
39 #define VEND1_PHY_CONTROL 0x8100
40 #define PHY_CONFIG_EN BIT(14)
41 #define PHY_START_OP BIT(0)
43 #define VEND1_PHY_CONFIG 0x8108
44 #define PHY_CONFIG_AUTO BIT(0)
46 #define VEND1_SIGNAL_QUALITY 0x8320
47 #define SQI_VALID BIT(14)
48 #define SQI_MASK GENMASK(2, 0)
49 #define MAX_SQI SQI_MASK
51 #define VEND1_CABLE_TEST 0x8330
52 #define CABLE_TEST_ENABLE BIT(15)
53 #define CABLE_TEST_START BIT(14)
54 #define CABLE_TEST_VALID BIT(13)
55 #define CABLE_TEST_OK 0x00
56 #define CABLE_TEST_SHORTED 0x01
57 #define CABLE_TEST_OPEN 0x02
58 #define CABLE_TEST_UNKNOWN 0x07
60 #define VEND1_PORT_CONTROL 0x8040
61 #define PORT_CONTROL_EN BIT(14)
63 #define VEND1_PORT_ABILITIES 0x8046
64 #define PTP_ABILITY BIT(3)
66 #define VEND1_PORT_INFRA_CONTROL 0xAC00
67 #define PORT_INFRA_CONTROL_EN BIT(14)
69 #define VEND1_RXID 0xAFCC
70 #define VEND1_TXID 0xAFCD
71 #define ID_ENABLE BIT(15)
73 #define VEND1_ABILITIES 0xAFC4
74 #define RGMII_ID_ABILITY BIT(15)
75 #define RGMII_ABILITY BIT(14)
76 #define RMII_ABILITY BIT(10)
77 #define REVMII_ABILITY BIT(9)
78 #define MII_ABILITY BIT(8)
79 #define SGMII_ABILITY BIT(0)
81 #define VEND1_MII_BASIC_CONFIG 0xAFC6
82 #define MII_BASIC_CONFIG_REV BIT(8)
83 #define MII_BASIC_CONFIG_SGMII 0x9
84 #define MII_BASIC_CONFIG_RGMII 0x7
85 #define MII_BASIC_CONFIG_RMII 0x5
86 #define MII_BASIC_CONFIG_MII 0x4
88 #define VEND1_SYMBOL_ERROR_COUNTER 0x8350
89 #define VEND1_LINK_DROP_COUNTER 0x8352
90 #define VEND1_LINK_LOSSES_AND_FAILURES 0x8353
91 #define VEND1_R_GOOD_FRAME_CNT 0xA950
92 #define VEND1_R_BAD_FRAME_CNT 0xA952
93 #define VEND1_R_RXER_FRAME_CNT 0xA954
94 #define VEND1_RX_PREAMBLE_COUNT 0xAFCE
95 #define VEND1_TX_PREAMBLE_COUNT 0xAFCF
96 #define VEND1_RX_IPG_LENGTH 0xAFD0
97 #define VEND1_TX_IPG_LENGTH 0xAFD1
98 #define COUNTER_EN BIT(15)
100 #define VEND1_LTC_LOAD_CTRL 0x1105
101 #define READ_LTC BIT(2)
102 #define LOAD_LTC BIT(0)
104 #define VEND1_LTC_WR_NSEC_0 0x1106
105 #define VEND1_LTC_WR_NSEC_1 0x1107
106 #define VEND1_LTC_WR_SEC_0 0x1108
107 #define VEND1_LTC_WR_SEC_1 0x1109
109 #define VEND1_LTC_RD_NSEC_0 0x110A
110 #define VEND1_LTC_RD_NSEC_1 0x110B
111 #define VEND1_LTC_RD_SEC_0 0x110C
112 #define VEND1_LTC_RD_SEC_1 0x110D
114 #define VEND1_RATE_ADJ_SUBNS_0 0x110F
115 #define VEND1_RATE_ADJ_SUBNS_1 0x1110
116 #define CLK_RATE_ADJ_LD BIT(15)
117 #define CLK_RATE_ADJ_DIR BIT(14)
119 #define VEND1_HW_LTC_LOCK_CTRL 0x1115
120 #define HW_LTC_LOCK_EN BIT(0)
122 #define VEND1_PTP_IRQ_EN 0x1131
123 #define VEND1_PTP_IRQ_STATUS 0x1132
124 #define PTP_IRQ_EGR_TS BIT(0)
126 #define VEND1_RX_TS_INSRT_CTRL 0x114D
127 #define RX_TS_INSRT_MODE2 0x02
129 #define VEND1_EGR_RING_DATA_0 0x114E
130 #define VEND1_EGR_RING_DATA_1_SEQ_ID 0x114F
131 #define VEND1_EGR_RING_DATA_2_NSEC_15_0 0x1150
132 #define VEND1_EGR_RING_DATA_3 0x1151
133 #define VEND1_EGR_RING_CTRL 0x1154
135 #define RING_DATA_0_DOMAIN_NUMBER GENMASK(7, 0)
136 #define RING_DATA_0_MSG_TYPE GENMASK(11, 8)
137 #define RING_DATA_0_SEC_4_2 GENMASK(14, 2)
138 #define RING_DATA_0_TS_VALID BIT(15)
140 #define RING_DATA_3_NSEC_29_16 GENMASK(13, 0)
141 #define RING_DATA_3_SEC_1_0 GENMASK(15, 14)
142 #define RING_DATA_5_SEC_16_5 GENMASK(15, 4)
143 #define RING_DONE BIT(0)
145 #define TS_SEC_MASK GENMASK(1, 0)
147 #define VEND1_PORT_FUNC_ENABLES 0x8048
148 #define PTP_ENABLE BIT(3)
150 #define VEND1_PORT_PTP_CONTROL 0x9000
151 #define PORT_PTP_CONTROL_BYPASS BIT(11)
153 #define VEND1_PTP_CLK_PERIOD 0x1104
154 #define PTP_CLK_PERIOD_100BT1 15ULL
156 #define VEND1_EVENT_MSG_FILT 0x1148
157 #define EVENT_MSG_FILT_ALL 0x0F
158 #define EVENT_MSG_FILT_NONE 0x00
160 #define VEND1_TX_PIPE_DLY_NS 0x1149
161 #define VEND1_TX_PIPEDLY_SUBNS 0x114A
162 #define VEND1_RX_PIPE_DLY_NS 0x114B
163 #define VEND1_RX_PIPEDLY_SUBNS 0x114C
165 #define RGMII_PERIOD_PS 8000U
166 #define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360)
167 #define MIN_ID_PS 1644U
168 #define MAX_ID_PS 2260U
169 #define DEFAULT_ID_PS 2000U
171 #define PPM_TO_SUBNS_INC(ppb) div_u64(GENMASK(31, 0) * (ppb) * \
172 PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC)
174 #define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
176 struct nxp_c45_skb_cb {
177 struct ptp_header *header;
181 struct nxp_c45_hwts {
190 struct phy_device *phydev;
191 struct mii_timestamper mii_ts;
192 struct ptp_clock *ptp_clock;
193 struct ptp_clock_info caps;
194 struct sk_buff_head tx_queue;
195 struct sk_buff_head rx_queue;
196 /* used to access the PTP registers atomic */
197 struct mutex ptp_lock;
204 struct nxp_c45_phy_stats {
212 static bool nxp_c45_poll_txts(struct phy_device *phydev)
214 return phydev->irq <= 0;
217 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
218 struct timespec64 *ts,
219 struct ptp_system_timestamp *sts)
221 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
223 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
225 ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
226 VEND1_LTC_RD_NSEC_0);
227 ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
228 VEND1_LTC_RD_NSEC_1) << 16;
229 ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
231 ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
232 VEND1_LTC_RD_SEC_1) << 16;
237 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
238 struct timespec64 *ts,
239 struct ptp_system_timestamp *sts)
241 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
243 mutex_lock(&priv->ptp_lock);
244 _nxp_c45_ptp_gettimex64(ptp, ts, sts);
245 mutex_unlock(&priv->ptp_lock);
250 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
251 const struct timespec64 *ts)
253 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
255 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_0,
257 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_1,
259 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_0,
261 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_1,
263 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
269 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
270 const struct timespec64 *ts)
272 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
274 mutex_lock(&priv->ptp_lock);
275 _nxp_c45_ptp_settime64(ptp, ts);
276 mutex_unlock(&priv->ptp_lock);
281 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
283 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
284 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
288 mutex_lock(&priv->ptp_lock);
292 subns_inc_val = PPM_TO_SUBNS_INC(ppb);
294 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_0,
296 subns_inc_val >>= 16;
297 subns_inc_val |= CLK_RATE_ADJ_LD;
299 subns_inc_val |= CLK_RATE_ADJ_DIR;
301 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_1,
303 mutex_unlock(&priv->ptp_lock);
308 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
310 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
311 struct timespec64 now, then;
313 mutex_lock(&priv->ptp_lock);
314 then = ns_to_timespec64(delta);
315 _nxp_c45_ptp_gettimex64(ptp, &now, NULL);
316 now = timespec64_add(now, then);
317 _nxp_c45_ptp_settime64(ptp, &now);
318 mutex_unlock(&priv->ptp_lock);
323 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
324 struct nxp_c45_hwts *hwts)
326 ts->tv_nsec = hwts->nsec;
327 if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
328 ts->tv_sec -= TS_SEC_MASK + 1;
329 ts->tv_sec &= ~TS_SEC_MASK;
330 ts->tv_sec |= hwts->sec & TS_SEC_MASK;
333 static bool nxp_c45_match_ts(struct ptp_header *header,
334 struct nxp_c45_hwts *hwts,
337 return ntohs(header->sequence_id) == hwts->sequence_id &&
338 ptp_get_msgtype(header, type) == hwts->msg_type &&
339 header->domain_number == hwts->domain_number;
342 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
343 struct nxp_c45_hwts *hwts)
348 mutex_lock(&priv->ptp_lock);
349 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
351 reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
352 valid = !!(reg & RING_DATA_0_TS_VALID);
354 goto nxp_c45_get_hwtxts_out;
356 hwts->domain_number = reg;
357 hwts->msg_type = (reg & RING_DATA_0_MSG_TYPE) >> 8;
358 hwts->sec = (reg & RING_DATA_0_SEC_4_2) >> 10;
359 hwts->sequence_id = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
360 VEND1_EGR_RING_DATA_1_SEQ_ID);
361 hwts->nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
362 VEND1_EGR_RING_DATA_2_NSEC_15_0);
363 reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_3);
364 hwts->nsec |= (reg & RING_DATA_3_NSEC_29_16) << 16;
365 hwts->sec |= (reg & RING_DATA_3_SEC_1_0) >> 14;
367 nxp_c45_get_hwtxts_out:
368 mutex_unlock(&priv->ptp_lock);
372 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
373 struct nxp_c45_hwts *txts)
375 struct sk_buff *skb, *tmp, *skb_match = NULL;
376 struct skb_shared_hwtstamps shhwtstamps;
377 struct timespec64 ts;
382 spin_lock_irqsave(&priv->tx_queue.lock, flags);
383 skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
384 ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
385 NXP_C45_SKB_CB(skb)->type);
389 __skb_unlink(skb, &priv->tx_queue);
392 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
395 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
396 nxp_c45_reconstruct_ts(&ts, txts);
397 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
398 ts_ns = timespec64_to_ns(&ts);
399 shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
400 skb_complete_tx_timestamp(skb_match, &shhwtstamps);
402 phydev_warn(priv->phydev,
403 "the tx timestamp doesn't match with any skb\n");
407 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
409 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
410 bool poll_txts = nxp_c45_poll_txts(priv->phydev);
411 struct skb_shared_hwtstamps *shhwtstamps_rx;
412 struct nxp_c45_hwts hwts;
413 bool reschedule = false;
414 struct timespec64 ts;
419 while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
420 txts_valid = nxp_c45_get_hwtxts(priv, &hwts);
421 if (unlikely(!txts_valid)) {
422 /* Still more skbs in the queue */
427 nxp_c45_process_txts(priv, &hwts);
430 while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
431 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
432 ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
433 hwts.sec = ts_raw >> 30;
434 hwts.nsec = ts_raw & GENMASK(29, 0);
435 nxp_c45_reconstruct_ts(&ts, &hwts);
436 shhwtstamps_rx = skb_hwtstamps(skb);
437 shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
438 NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
442 return reschedule ? 1 : -1;
445 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
447 priv->caps = (struct ptp_clock_info) {
448 .owner = THIS_MODULE,
449 .name = "NXP C45 PHC",
451 .adjfine = nxp_c45_ptp_adjfine,
452 .adjtime = nxp_c45_ptp_adjtime,
453 .gettimex64 = nxp_c45_ptp_gettimex64,
454 .settime64 = nxp_c45_ptp_settime64,
455 .do_aux_work = nxp_c45_do_aux_work,
458 priv->ptp_clock = ptp_clock_register(&priv->caps,
459 &priv->phydev->mdio.dev);
461 if (IS_ERR(priv->ptp_clock))
462 return PTR_ERR(priv->ptp_clock);
464 if (!priv->ptp_clock)
470 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
471 struct sk_buff *skb, int type)
473 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
476 switch (priv->hwts_tx) {
478 NXP_C45_SKB_CB(skb)->type = type;
479 NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
480 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
481 skb_queue_tail(&priv->tx_queue, skb);
482 if (nxp_c45_poll_txts(priv->phydev))
483 ptp_schedule_worker(priv->ptp_clock, 0);
485 case HWTSTAMP_TX_OFF:
492 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
493 struct sk_buff *skb, int type)
495 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
497 struct ptp_header *header = ptp_parse_header(skb, type);
505 NXP_C45_SKB_CB(skb)->header = header;
506 skb_queue_tail(&priv->rx_queue, skb);
507 ptp_schedule_worker(priv->ptp_clock, 0);
512 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
515 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
517 struct phy_device *phydev = priv->phydev;
518 struct hwtstamp_config cfg;
520 if (copy_from_user(&cfg, ifreq->ifr_data, sizeof(cfg)))
523 if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ON)
526 priv->hwts_tx = cfg.tx_type;
528 switch (cfg.rx_filter) {
529 case HWTSTAMP_FILTER_NONE:
532 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
533 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
534 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
536 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
542 if (priv->hwts_rx || priv->hwts_tx) {
543 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
545 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
546 VEND1_PORT_PTP_CONTROL,
547 PORT_PTP_CONTROL_BYPASS);
549 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
550 EVENT_MSG_FILT_NONE);
551 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_PTP_CONTROL,
552 PORT_PTP_CONTROL_BYPASS);
555 if (nxp_c45_poll_txts(priv->phydev))
556 goto nxp_c45_no_ptp_irq;
559 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
560 VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
562 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
563 VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
566 return copy_to_user(ifreq->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
569 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
570 struct ethtool_ts_info *ts_info)
572 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
575 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
576 SOF_TIMESTAMPING_RX_HARDWARE |
577 SOF_TIMESTAMPING_RAW_HARDWARE;
578 ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
579 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
580 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
581 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
582 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
583 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
588 static const struct nxp_c45_phy_stats nxp_c45_hw_stats[] = {
589 { "phy_symbol_error_cnt", MDIO_MMD_VEND1,
590 VEND1_SYMBOL_ERROR_COUNTER, 0, GENMASK(15, 0) },
591 { "phy_link_status_drop_cnt", MDIO_MMD_VEND1,
592 VEND1_LINK_DROP_COUNTER, 8, GENMASK(13, 8) },
593 { "phy_link_availability_drop_cnt", MDIO_MMD_VEND1,
594 VEND1_LINK_DROP_COUNTER, 0, GENMASK(5, 0) },
595 { "phy_link_loss_cnt", MDIO_MMD_VEND1,
596 VEND1_LINK_LOSSES_AND_FAILURES, 10, GENMASK(15, 10) },
597 { "phy_link_failure_cnt", MDIO_MMD_VEND1,
598 VEND1_LINK_LOSSES_AND_FAILURES, 0, GENMASK(9, 0) },
599 { "r_good_frame_cnt", MDIO_MMD_VEND1,
600 VEND1_R_GOOD_FRAME_CNT, 0, GENMASK(15, 0) },
601 { "r_bad_frame_cnt", MDIO_MMD_VEND1,
602 VEND1_R_BAD_FRAME_CNT, 0, GENMASK(15, 0) },
603 { "r_rxer_frame_cnt", MDIO_MMD_VEND1,
604 VEND1_R_RXER_FRAME_CNT, 0, GENMASK(15, 0) },
605 { "rx_preamble_count", MDIO_MMD_VEND1,
606 VEND1_RX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
607 { "tx_preamble_count", MDIO_MMD_VEND1,
608 VEND1_TX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
609 { "rx_ipg_length", MDIO_MMD_VEND1,
610 VEND1_RX_IPG_LENGTH, 0, GENMASK(8, 0) },
611 { "tx_ipg_length", MDIO_MMD_VEND1,
612 VEND1_TX_IPG_LENGTH, 0, GENMASK(8, 0) },
615 static int nxp_c45_get_sset_count(struct phy_device *phydev)
617 return ARRAY_SIZE(nxp_c45_hw_stats);
620 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
624 for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
625 strncpy(data + i * ETH_GSTRING_LEN,
626 nxp_c45_hw_stats[i].name, ETH_GSTRING_LEN);
630 static void nxp_c45_get_stats(struct phy_device *phydev,
631 struct ethtool_stats *stats, u64 *data)
636 for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
637 ret = phy_read_mmd(phydev, nxp_c45_hw_stats[i].mmd,
638 nxp_c45_hw_stats[i].reg);
642 data[i] = ret & nxp_c45_hw_stats[i].mask;
643 data[i] >>= nxp_c45_hw_stats[i].off;
648 static int nxp_c45_config_enable(struct phy_device *phydev)
650 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
651 DEVICE_CONTROL_CONFIG_GLOBAL_EN |
652 DEVICE_CONTROL_CONFIG_ALL_EN);
653 usleep_range(400, 450);
655 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
657 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
659 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
660 PORT_INFRA_CONTROL_EN);
665 static int nxp_c45_start_op(struct phy_device *phydev)
667 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
671 static int nxp_c45_config_intr(struct phy_device *phydev)
673 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
674 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
675 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
677 return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
678 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
681 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
683 struct nxp_c45_phy *priv = phydev->priv;
684 irqreturn_t ret = IRQ_NONE;
685 struct nxp_c45_hwts hwts;
688 irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
689 if (irq & PHY_IRQ_LINK_EVENT) {
690 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
692 phy_trigger_machine(phydev);
696 /* There is no need for ACK.
697 * The irq signal will be asserted until the EGR TS FIFO will be
700 irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_IRQ_STATUS);
701 if (irq & PTP_IRQ_EGR_TS) {
702 while (nxp_c45_get_hwtxts(priv, &hwts))
703 nxp_c45_process_txts(priv, &hwts);
711 static int nxp_c45_soft_reset(struct phy_device *phydev)
715 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
716 DEVICE_CONTROL_RESET);
720 return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
721 VEND1_DEVICE_CONTROL, ret,
722 !(ret & DEVICE_CONTROL_RESET), 20000,
726 static int nxp_c45_cable_test_start(struct phy_device *phydev)
728 return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
729 CABLE_TEST_ENABLE | CABLE_TEST_START);
732 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
736 u8 cable_test_result;
738 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST);
739 if (!(ret & CABLE_TEST_VALID)) {
745 cable_test_result = ret & GENMASK(2, 0);
747 switch (cable_test_result) {
749 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
750 ETHTOOL_A_CABLE_RESULT_CODE_OK);
752 case CABLE_TEST_SHORTED:
753 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
754 ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
756 case CABLE_TEST_OPEN:
757 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
758 ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
761 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
762 ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
765 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
768 return nxp_c45_start_op(phydev);
771 static int nxp_c45_setup_master_slave(struct phy_device *phydev)
773 switch (phydev->master_slave_set) {
774 case MASTER_SLAVE_CFG_MASTER_FORCE:
775 case MASTER_SLAVE_CFG_MASTER_PREFERRED:
776 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
779 case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
780 case MASTER_SLAVE_CFG_SLAVE_FORCE:
781 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
784 case MASTER_SLAVE_CFG_UNKNOWN:
785 case MASTER_SLAVE_CFG_UNSUPPORTED:
788 phydev_warn(phydev, "Unsupported Master/Slave mode\n");
795 static int nxp_c45_read_master_slave(struct phy_device *phydev)
799 phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
800 phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
802 reg = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL);
806 if (reg & B100T1_PMAPMD_MASTER) {
807 phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
808 phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
810 phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
811 phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
817 static int nxp_c45_config_aneg(struct phy_device *phydev)
819 return nxp_c45_setup_master_slave(phydev);
822 static int nxp_c45_read_status(struct phy_device *phydev)
826 ret = genphy_c45_read_status(phydev);
830 ret = nxp_c45_read_master_slave(phydev);
837 static int nxp_c45_get_sqi(struct phy_device *phydev)
841 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
842 if (!(reg & SQI_VALID))
850 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
855 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
857 if (delay < MIN_ID_PS) {
858 phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
862 if (delay > MAX_ID_PS) {
863 phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
870 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
872 /* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
873 * To avoid floating point operations we'll multiply by 10
874 * and get 1 decimal point precision.
876 phase_offset_raw *= 10;
877 phase_offset_raw -= 738;
878 return div_u64(phase_offset_raw, 9);
881 static void nxp_c45_disable_delays(struct phy_device *phydev)
883 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
884 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
887 static void nxp_c45_set_delays(struct phy_device *phydev)
889 struct nxp_c45_phy *priv = phydev->priv;
890 u64 tx_delay = priv->tx_delay;
891 u64 rx_delay = priv->rx_delay;
894 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
895 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
896 degree = div_u64(tx_delay, PS_PER_DEGREE);
897 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
898 ID_ENABLE | nxp_c45_get_phase_shift(degree));
900 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
904 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
905 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
906 degree = div_u64(rx_delay, PS_PER_DEGREE);
907 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
908 ID_ENABLE | nxp_c45_get_phase_shift(degree));
910 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
915 static int nxp_c45_get_delays(struct phy_device *phydev)
917 struct nxp_c45_phy *priv = phydev->priv;
920 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
921 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
922 ret = device_property_read_u32(&phydev->mdio.dev,
923 "tx-internal-delay-ps",
926 priv->tx_delay = DEFAULT_ID_PS;
928 ret = nxp_c45_check_delay(phydev, priv->tx_delay);
931 "tx-internal-delay-ps invalid value\n");
936 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
937 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
938 ret = device_property_read_u32(&phydev->mdio.dev,
939 "rx-internal-delay-ps",
942 priv->rx_delay = DEFAULT_ID_PS;
944 ret = nxp_c45_check_delay(phydev, priv->rx_delay);
947 "rx-internal-delay-ps invalid value\n");
955 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
959 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
960 phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
962 switch (phydev->interface) {
963 case PHY_INTERFACE_MODE_RGMII:
964 if (!(ret & RGMII_ABILITY)) {
965 phydev_err(phydev, "rgmii mode not supported\n");
968 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
969 MII_BASIC_CONFIG_RGMII);
970 nxp_c45_disable_delays(phydev);
972 case PHY_INTERFACE_MODE_RGMII_ID:
973 case PHY_INTERFACE_MODE_RGMII_TXID:
974 case PHY_INTERFACE_MODE_RGMII_RXID:
975 if (!(ret & RGMII_ID_ABILITY)) {
976 phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
979 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
980 MII_BASIC_CONFIG_RGMII);
981 ret = nxp_c45_get_delays(phydev);
985 nxp_c45_set_delays(phydev);
987 case PHY_INTERFACE_MODE_MII:
988 if (!(ret & MII_ABILITY)) {
989 phydev_err(phydev, "mii mode not supported\n");
992 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
993 MII_BASIC_CONFIG_MII);
995 case PHY_INTERFACE_MODE_REVMII:
996 if (!(ret & REVMII_ABILITY)) {
997 phydev_err(phydev, "rev-mii mode not supported\n");
1000 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1001 MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1003 case PHY_INTERFACE_MODE_RMII:
1004 if (!(ret & RMII_ABILITY)) {
1005 phydev_err(phydev, "rmii mode not supported\n");
1008 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1009 MII_BASIC_CONFIG_RMII);
1011 case PHY_INTERFACE_MODE_SGMII:
1012 if (!(ret & SGMII_ABILITY)) {
1013 phydev_err(phydev, "sgmii mode not supported\n");
1016 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1017 MII_BASIC_CONFIG_SGMII);
1019 case PHY_INTERFACE_MODE_INTERNAL:
1028 static int nxp_c45_config_init(struct phy_device *phydev)
1032 ret = nxp_c45_config_enable(phydev);
1034 phydev_err(phydev, "Failed to enable config\n");
1038 /* Bug workaround for SJA1110 rev B: enable write access
1039 * to MDIO_MMD_PMAPMD
1041 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1042 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1044 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1047 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1049 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1051 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1053 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1055 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1058 ret = nxp_c45_set_phy_mode(phydev);
1062 phydev->autoneg = AUTONEG_DISABLE;
1064 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CLK_PERIOD,
1065 PTP_CLK_PERIOD_100BT1);
1066 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_HW_LTC_LOCK_CTRL,
1068 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1070 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1073 return nxp_c45_start_op(phydev);
1076 static int nxp_c45_probe(struct phy_device *phydev)
1078 struct nxp_c45_phy *priv;
1082 priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1086 skb_queue_head_init(&priv->tx_queue);
1087 skb_queue_head_init(&priv->rx_queue);
1089 priv->phydev = phydev;
1091 phydev->priv = priv;
1093 mutex_init(&priv->ptp_lock);
1095 ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1096 VEND1_PORT_ABILITIES);
1097 ptp_ability = !!(ptp_ability & PTP_ABILITY);
1099 phydev_dbg(phydev, "the phy does not support PTP");
1100 goto no_ptp_support;
1103 if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1104 IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1105 priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1106 priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1107 priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1108 priv->mii_ts.ts_info = nxp_c45_ts_info;
1109 phydev->mii_ts = &priv->mii_ts;
1110 ret = nxp_c45_init_ptp_clock(priv);
1112 phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1120 static struct phy_driver nxp_c45_driver[] = {
1122 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
1123 .name = "NXP C45 TJA1103",
1124 .features = PHY_BASIC_T1_FEATURES,
1125 .probe = nxp_c45_probe,
1126 .soft_reset = nxp_c45_soft_reset,
1127 .config_aneg = nxp_c45_config_aneg,
1128 .config_init = nxp_c45_config_init,
1129 .config_intr = nxp_c45_config_intr,
1130 .handle_interrupt = nxp_c45_handle_interrupt,
1131 .read_status = nxp_c45_read_status,
1132 .suspend = genphy_c45_pma_suspend,
1133 .resume = genphy_c45_pma_resume,
1134 .get_sset_count = nxp_c45_get_sset_count,
1135 .get_strings = nxp_c45_get_strings,
1136 .get_stats = nxp_c45_get_stats,
1137 .cable_test_start = nxp_c45_cable_test_start,
1138 .cable_test_get_status = nxp_c45_cable_test_get_status,
1139 .set_loopback = genphy_c45_loopback,
1140 .get_sqi = nxp_c45_get_sqi,
1141 .get_sqi_max = nxp_c45_get_sqi_max,
1145 module_phy_driver(nxp_c45_driver);
1147 static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
1148 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
1152 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
1154 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
1155 MODULE_DESCRIPTION("NXP C45 PHY driver");
1156 MODULE_LICENSE("GPL v2");