1 // SPDX-License-Identifier: GPL-2.0+
3 * TI Common Platform Time Sync
5 * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
8 #include <linux/clk-provider.h>
11 #include <linux/hrtimer.h>
12 #include <linux/module.h>
13 #include <linux/net_tstamp.h>
14 #include <linux/ptp_classify.h>
15 #include <linux/time.h>
16 #include <linux/uaccess.h>
17 #include <linux/workqueue.h>
18 #include <linux/if_ether.h>
19 #include <linux/if_vlan.h>
23 #define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
24 #define CPTS_SKB_RX_TX_TMO 100 /*ms */
25 #define CPTS_EVENT_RX_TX_TIMEOUT (100) /* ms */
27 struct cpts_skb_cb_data {
32 #define cpts_read32(c, r) readl_relaxed(&c->reg->r)
33 #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
35 static int event_expired(struct cpts_event *event)
37 return time_after(jiffies, event->tmo);
40 static int event_type(struct cpts_event *event)
42 return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
45 static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
47 u32 r = cpts_read32(cpts, intstat_raw);
49 if (r & TS_PEND_RAW) {
50 *high = cpts_read32(cpts, event_high);
51 *low = cpts_read32(cpts, event_low);
52 cpts_write32(cpts, EVENT_POP, event_pop);
58 static int cpts_purge_events(struct cpts *cpts)
60 struct list_head *this, *next;
61 struct cpts_event *event;
64 list_for_each_safe(this, next, &cpts->events) {
65 event = list_entry(this, struct cpts_event, list);
66 if (event_expired(event)) {
67 list_del_init(&event->list);
68 list_add(&event->list, &cpts->pool);
74 dev_dbg(cpts->dev, "cpts: event pool cleaned up %d\n", removed);
75 return removed ? 0 : -1;
78 static void cpts_purge_txq(struct cpts *cpts)
80 struct cpts_skb_cb_data *skb_cb;
81 struct sk_buff *skb, *tmp;
84 skb_queue_walk_safe(&cpts->txq, skb, tmp) {
85 skb_cb = (struct cpts_skb_cb_data *)skb->cb;
86 if (time_after(jiffies, skb_cb->tmo)) {
87 __skb_unlink(skb, &cpts->txq);
88 dev_consume_skb_any(skb);
94 dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
98 * Returns zero if matching event type was found.
100 static int cpts_fifo_read(struct cpts *cpts, int match)
102 struct cpts_event *event;
107 spin_lock_irqsave(&cpts->lock, flags);
109 for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
110 if (cpts_fifo_pop(cpts, &hi, &lo))
113 if (list_empty(&cpts->pool) && cpts_purge_events(cpts)) {
114 dev_warn(cpts->dev, "cpts: event pool empty\n");
118 event = list_first_entry(&cpts->pool, struct cpts_event, list);
121 event->timestamp = timecounter_cyc2time(&cpts->tc, event->low);
122 type = event_type(event);
124 dev_dbg(cpts->dev, "CPTS_EV: %d high:%08X low:%08x\n",
125 type, event->high, event->low);
128 WRITE_ONCE(cpts->cur_timestamp, lo);
129 timecounter_read(&cpts->tc);
130 if (cpts->mult_new) {
131 cpts->cc.mult = cpts->mult_new;
137 event->tmo = jiffies +
138 msecs_to_jiffies(CPTS_EVENT_RX_TX_TIMEOUT);
140 list_del_init(&event->list);
141 list_add_tail(&event->list, &cpts->events);
148 dev_err(cpts->dev, "cpts: unknown event type\n");
155 spin_unlock_irqrestore(&cpts->lock, flags);
157 return type == match ? 0 : -1;
160 static u64 cpts_systim_read(const struct cyclecounter *cc)
162 struct cpts *cpts = container_of(cc, struct cpts, cc);
164 return READ_ONCE(cpts->cur_timestamp);
167 static void cpts_update_cur_time(struct cpts *cpts, int match,
168 struct ptp_system_timestamp *sts)
172 /* use spin_lock_irqsave() here as it has to run very fast */
173 spin_lock_irqsave(&cpts->lock, flags);
174 ptp_read_system_prets(sts);
175 cpts_write32(cpts, TS_PUSH, ts_push);
176 cpts_read32(cpts, ts_push);
177 ptp_read_system_postts(sts);
178 spin_unlock_irqrestore(&cpts->lock, flags);
180 if (cpts_fifo_read(cpts, match) && match != -1)
181 dev_err(cpts->dev, "cpts: unable to obtain a time stamp\n");
184 /* PTP clock operations */
186 static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
188 struct cpts *cpts = container_of(ptp, struct cpts, info);
197 mult = cpts->cc_mult;
200 diff = div_u64(adj, 1000000000ULL);
202 mutex_lock(&cpts->ptp_clk_mutex);
204 cpts->mult_new = neg_adj ? mult - diff : mult + diff;
206 cpts_update_cur_time(cpts, CPTS_EV_PUSH, NULL);
208 mutex_unlock(&cpts->ptp_clk_mutex);
212 static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
214 struct cpts *cpts = container_of(ptp, struct cpts, info);
216 mutex_lock(&cpts->ptp_clk_mutex);
217 timecounter_adjtime(&cpts->tc, delta);
218 mutex_unlock(&cpts->ptp_clk_mutex);
223 static int cpts_ptp_gettimeex(struct ptp_clock_info *ptp,
224 struct timespec64 *ts,
225 struct ptp_system_timestamp *sts)
227 struct cpts *cpts = container_of(ptp, struct cpts, info);
230 mutex_lock(&cpts->ptp_clk_mutex);
232 cpts_update_cur_time(cpts, CPTS_EV_PUSH, sts);
234 ns = timecounter_read(&cpts->tc);
235 mutex_unlock(&cpts->ptp_clk_mutex);
237 *ts = ns_to_timespec64(ns);
242 static int cpts_ptp_settime(struct ptp_clock_info *ptp,
243 const struct timespec64 *ts)
245 struct cpts *cpts = container_of(ptp, struct cpts, info);
248 ns = timespec64_to_ns(ts);
250 mutex_lock(&cpts->ptp_clk_mutex);
251 timecounter_init(&cpts->tc, &cpts->cc, ns);
252 mutex_unlock(&cpts->ptp_clk_mutex);
257 static int cpts_ptp_enable(struct ptp_clock_info *ptp,
258 struct ptp_clock_request *rq, int on)
263 static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
265 struct sk_buff_head txq_list;
266 struct sk_buff *skb, *tmp;
271 mtype_seqid = event->high &
272 ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
273 (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
274 (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
276 __skb_queue_head_init(&txq_list);
278 spin_lock_irqsave(&cpts->txq.lock, flags);
279 skb_queue_splice_init(&cpts->txq, &txq_list);
280 spin_unlock_irqrestore(&cpts->txq.lock, flags);
282 skb_queue_walk_safe(&txq_list, skb, tmp) {
283 struct skb_shared_hwtstamps ssh;
284 struct cpts_skb_cb_data *skb_cb =
285 (struct cpts_skb_cb_data *)skb->cb;
287 if (mtype_seqid == skb_cb->skb_mtype_seqid) {
288 memset(&ssh, 0, sizeof(ssh));
289 ssh.hwtstamp = ns_to_ktime(event->timestamp);
290 skb_tstamp_tx(skb, &ssh);
292 __skb_unlink(skb, &txq_list);
293 dev_consume_skb_any(skb);
294 dev_dbg(cpts->dev, "match tx timestamp mtype_seqid %08x\n",
299 if (time_after(jiffies, skb_cb->tmo)) {
300 /* timeout any expired skbs over 1s */
301 dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
302 __skb_unlink(skb, &txq_list);
303 dev_consume_skb_any(skb);
307 spin_lock_irqsave(&cpts->txq.lock, flags);
308 skb_queue_splice(&txq_list, &cpts->txq);
309 spin_unlock_irqrestore(&cpts->txq.lock, flags);
314 static void cpts_process_events(struct cpts *cpts)
316 struct list_head *this, *next;
317 struct cpts_event *event;
318 LIST_HEAD(events_free);
322 spin_lock_irqsave(&cpts->lock, flags);
323 list_splice_init(&cpts->events, &events);
324 spin_unlock_irqrestore(&cpts->lock, flags);
326 list_for_each_safe(this, next, &events) {
327 event = list_entry(this, struct cpts_event, list);
328 if (cpts_match_tx_ts(cpts, event) ||
329 time_after(jiffies, event->tmo)) {
330 list_del_init(&event->list);
331 list_add(&event->list, &events_free);
335 spin_lock_irqsave(&cpts->lock, flags);
336 list_splice_tail(&events, &cpts->events);
337 list_splice_tail(&events_free, &cpts->pool);
338 spin_unlock_irqrestore(&cpts->lock, flags);
341 static long cpts_overflow_check(struct ptp_clock_info *ptp)
343 struct cpts *cpts = container_of(ptp, struct cpts, info);
344 unsigned long delay = cpts->ov_check_period;
348 mutex_lock(&cpts->ptp_clk_mutex);
350 cpts_update_cur_time(cpts, -1, NULL);
351 ns = timecounter_read(&cpts->tc);
353 cpts_process_events(cpts);
355 spin_lock_irqsave(&cpts->txq.lock, flags);
356 if (!skb_queue_empty(&cpts->txq)) {
357 cpts_purge_txq(cpts);
358 if (!skb_queue_empty(&cpts->txq))
359 delay = CPTS_SKB_TX_WORK_TIMEOUT;
361 spin_unlock_irqrestore(&cpts->txq.lock, flags);
363 dev_dbg(cpts->dev, "cpts overflow check at %lld\n", ns);
364 mutex_unlock(&cpts->ptp_clk_mutex);
368 static const struct ptp_clock_info cpts_info = {
369 .owner = THIS_MODULE,
370 .name = "CTPS timer",
375 .adjfreq = cpts_ptp_adjfreq,
376 .adjtime = cpts_ptp_adjtime,
377 .gettimex64 = cpts_ptp_gettimeex,
378 .settime64 = cpts_ptp_settime,
379 .enable = cpts_ptp_enable,
380 .do_aux_work = cpts_overflow_check,
383 static int cpts_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
385 unsigned int ptp_class = ptp_classify_raw(skb);
386 u8 *msgtype, *data = skb->data;
387 unsigned int offset = 0;
390 if (ptp_class == PTP_CLASS_NONE)
393 if (ptp_class & PTP_CLASS_VLAN)
396 switch (ptp_class & PTP_CLASS_PMASK) {
398 offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
401 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
410 if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
413 if (unlikely(ptp_class & PTP_CLASS_V1))
414 msgtype = data + offset + OFF_PTP_CONTROL;
416 msgtype = data + offset;
418 seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
419 *mtype_seqid = (*msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT;
420 *mtype_seqid |= (ntohs(*seqid) & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT;
425 static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb,
426 int ev_type, u32 skb_mtype_seqid)
428 struct list_head *this, *next;
429 struct cpts_event *event;
434 cpts_fifo_read(cpts, -1);
435 spin_lock_irqsave(&cpts->lock, flags);
436 list_for_each_safe(this, next, &cpts->events) {
437 event = list_entry(this, struct cpts_event, list);
438 if (event_expired(event)) {
439 list_del_init(&event->list);
440 list_add(&event->list, &cpts->pool);
444 mtype_seqid = event->high &
445 ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
446 (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
447 (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
449 if (mtype_seqid == skb_mtype_seqid) {
450 ns = event->timestamp;
451 list_del_init(&event->list);
452 list_add(&event->list, &cpts->pool);
456 spin_unlock_irqrestore(&cpts->lock, flags);
461 void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
463 struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
464 struct skb_shared_hwtstamps *ssh;
468 ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
472 skb_cb->skb_mtype_seqid |= (CPTS_EV_RX << EVENT_TYPE_SHIFT);
474 dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
475 __func__, skb_cb->skb_mtype_seqid);
477 ns = cpts_find_ts(cpts, skb, CPTS_EV_RX, skb_cb->skb_mtype_seqid);
480 ssh = skb_hwtstamps(skb);
481 memset(ssh, 0, sizeof(*ssh));
482 ssh->hwtstamp = ns_to_ktime(ns);
484 EXPORT_SYMBOL_GPL(cpts_rx_timestamp);
486 void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
488 struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
491 if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
494 ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
498 skb_cb->skb_mtype_seqid |= (CPTS_EV_TX << EVENT_TYPE_SHIFT);
500 dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
501 __func__, skb_cb->skb_mtype_seqid);
503 /* Always defer TX TS processing to PTP worker */
505 /* get the timestamp for timeouts */
506 skb_cb->tmo = jiffies + msecs_to_jiffies(CPTS_SKB_RX_TX_TMO);
507 skb_queue_tail(&cpts->txq, skb);
508 ptp_schedule_worker(cpts->clock, 0);
510 EXPORT_SYMBOL_GPL(cpts_tx_timestamp);
512 int cpts_register(struct cpts *cpts)
516 skb_queue_head_init(&cpts->txq);
517 INIT_LIST_HEAD(&cpts->events);
518 INIT_LIST_HEAD(&cpts->pool);
519 for (i = 0; i < CPTS_MAX_EVENTS; i++)
520 list_add(&cpts->pool_data[i].list, &cpts->pool);
522 clk_enable(cpts->refclk);
524 cpts_write32(cpts, CPTS_EN, control);
525 cpts_write32(cpts, TS_PEND_EN, int_enable);
527 timecounter_init(&cpts->tc, &cpts->cc, ktime_get_real_ns());
529 cpts->clock = ptp_clock_register(&cpts->info, cpts->dev);
530 if (IS_ERR(cpts->clock)) {
531 err = PTR_ERR(cpts->clock);
535 cpts->phc_index = ptp_clock_index(cpts->clock);
537 ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
541 clk_disable(cpts->refclk);
544 EXPORT_SYMBOL_GPL(cpts_register);
546 void cpts_unregister(struct cpts *cpts)
548 if (WARN_ON(!cpts->clock))
551 ptp_clock_unregister(cpts->clock);
554 cpts_write32(cpts, 0, int_enable);
555 cpts_write32(cpts, 0, control);
557 /* Drop all packet */
558 skb_queue_purge(&cpts->txq);
560 clk_disable(cpts->refclk);
562 EXPORT_SYMBOL_GPL(cpts_unregister);
564 static void cpts_calc_mult_shift(struct cpts *cpts)
566 u64 frac, maxsec, ns;
569 freq = clk_get_rate(cpts->refclk);
571 /* Calc the maximum number of seconds which we can run before
574 maxsec = cpts->cc.mask;
575 do_div(maxsec, freq);
576 /* limit conversation rate to 10 sec as higher values will produce
577 * too small mult factors and so reduce the conversion accuracy
582 /* Calc overflow check period (maxsec / 2) */
583 cpts->ov_check_period = (HZ * maxsec) / 2;
584 dev_info(cpts->dev, "cpts: overflow check period %lu (jiffies)\n",
585 cpts->ov_check_period);
587 if (cpts->cc.mult || cpts->cc.shift)
590 clocks_calc_mult_shift(&cpts->cc.mult, &cpts->cc.shift,
591 freq, NSEC_PER_SEC, maxsec);
594 ns = cyclecounter_cyc2ns(&cpts->cc, freq, cpts->cc.mask, &frac);
597 "CPTS: ref_clk_freq:%u calc_mult:%u calc_shift:%u error:%lld nsec/sec\n",
598 freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC));
601 static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
603 struct device_node *refclk_np;
604 const char **parent_names;
605 unsigned int num_parents;
606 struct clk_hw *clk_hw;
610 refclk_np = of_get_child_by_name(node, "cpts-refclk-mux");
612 /* refclk selection supported not for all SoCs */
615 num_parents = of_clk_get_parent_count(refclk_np);
616 if (num_parents < 1) {
617 dev_err(cpts->dev, "mux-clock %s must have parents\n",
622 parent_names = devm_kzalloc(cpts->dev, (sizeof(char *) * num_parents),
625 mux_table = devm_kzalloc(cpts->dev, sizeof(*mux_table) * num_parents,
627 if (!mux_table || !parent_names) {
632 of_clk_parent_fill(refclk_np, parent_names, num_parents);
634 ret = of_property_read_variable_u32_array(refclk_np, "ti,mux-tbl",
636 num_parents, num_parents);
640 clk_hw = clk_hw_register_mux_table(cpts->dev, refclk_np->name,
641 parent_names, num_parents,
643 &cpts->reg->rftclk_sel, 0, 0x1F,
645 if (IS_ERR(clk_hw)) {
646 ret = PTR_ERR(clk_hw);
650 ret = devm_add_action_or_reset(cpts->dev,
651 (void(*)(void *))clk_hw_unregister_mux,
654 dev_err(cpts->dev, "add clkmux unreg action %d", ret);
658 ret = of_clk_add_hw_provider(refclk_np, of_clk_hw_simple_get, clk_hw);
662 ret = devm_add_action_or_reset(cpts->dev,
663 (void(*)(void *))of_clk_del_provider,
666 dev_err(cpts->dev, "add clkmux provider unreg action %d", ret);
673 of_node_put(refclk_np);
677 static int cpts_of_parse(struct cpts *cpts, struct device_node *node)
682 if (!of_property_read_u32(node, "cpts_clock_mult", &prop))
683 cpts->cc.mult = prop;
685 if (!of_property_read_u32(node, "cpts_clock_shift", &prop))
686 cpts->cc.shift = prop;
688 if ((cpts->cc.mult && !cpts->cc.shift) ||
689 (!cpts->cc.mult && cpts->cc.shift))
692 return cpts_of_mux_clk_setup(cpts, node);
695 dev_err(cpts->dev, "CPTS: Missing property in the DT.\n");
699 struct cpts *cpts_create(struct device *dev, void __iomem *regs,
700 struct device_node *node)
705 cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
707 return ERR_PTR(-ENOMEM);
710 cpts->reg = (struct cpsw_cpts __iomem *)regs;
711 spin_lock_init(&cpts->lock);
712 mutex_init(&cpts->ptp_clk_mutex);
714 ret = cpts_of_parse(cpts, node);
718 cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
719 if (IS_ERR(cpts->refclk))
720 /* try get clk from dev node for compatibility */
721 cpts->refclk = devm_clk_get(dev, "cpts");
723 if (IS_ERR(cpts->refclk)) {
724 dev_err(dev, "Failed to get cpts refclk %ld\n",
725 PTR_ERR(cpts->refclk));
726 return ERR_CAST(cpts->refclk);
729 ret = clk_prepare(cpts->refclk);
733 cpts->cc.read = cpts_systim_read;
734 cpts->cc.mask = CLOCKSOURCE_MASK(32);
735 cpts->info = cpts_info;
737 cpts_calc_mult_shift(cpts);
738 /* save cc.mult original value as it can be modified
739 * by cpts_ptp_adjfreq().
741 cpts->cc_mult = cpts->cc.mult;
745 EXPORT_SYMBOL_GPL(cpts_create);
747 void cpts_release(struct cpts *cpts)
752 if (WARN_ON(!cpts->refclk))
755 clk_unprepare(cpts->refclk);
757 EXPORT_SYMBOL_GPL(cpts_release);
759 MODULE_LICENSE("GPL v2");
760 MODULE_DESCRIPTION("TI CPTS driver");
761 MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");