1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* MHI Network driver - Network over MHI bus
4 * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
7 #include <linux/if_arp.h>
9 #include <linux/mod_devicetable.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/skbuff.h>
13 #include <linux/u64_stats_sync.h>
15 #define MHI_NET_MIN_MTU ETH_MIN_MTU
16 #define MHI_NET_MAX_MTU 0xffff
17 #define MHI_NET_DEFAULT_MTU 0x4000
19 struct mhi_net_stats {
20 u64_stats_t rx_packets;
22 u64_stats_t rx_errors;
23 u64_stats_t rx_dropped;
24 u64_stats_t tx_packets;
26 u64_stats_t tx_errors;
27 u64_stats_t tx_dropped;
29 struct u64_stats_sync tx_syncp;
30 struct u64_stats_sync rx_syncp;
34 struct mhi_device *mdev;
35 struct net_device *ndev;
36 struct delayed_work rx_refill;
37 struct mhi_net_stats stats;
41 static int mhi_ndo_open(struct net_device *ndev)
43 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
45 /* Feed the rx buffer pool */
46 schedule_delayed_work(&mhi_netdev->rx_refill, 0);
48 /* Carrier is established via out-of-band channel (e.g. qmi) */
49 netif_carrier_on(ndev);
51 netif_start_queue(ndev);
56 static int mhi_ndo_stop(struct net_device *ndev)
58 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
60 netif_stop_queue(ndev);
61 netif_carrier_off(ndev);
62 cancel_delayed_work_sync(&mhi_netdev->rx_refill);
67 static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
69 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
70 struct mhi_device *mdev = mhi_netdev->mdev;
73 err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
75 net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
78 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
79 u64_stats_inc(&mhi_netdev->stats.tx_dropped);
80 u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
83 dev_kfree_skb_any(skb);
86 if (mhi_queue_is_full(mdev, DMA_TO_DEVICE))
87 netif_stop_queue(ndev);
92 static void mhi_ndo_get_stats64(struct net_device *ndev,
93 struct rtnl_link_stats64 *stats)
95 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
99 start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp);
100 stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets);
101 stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
102 stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
103 stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped);
104 } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start));
107 start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp);
108 stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets);
109 stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes);
110 stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors);
111 stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped);
112 } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start));
115 static const struct net_device_ops mhi_netdev_ops = {
116 .ndo_open = mhi_ndo_open,
117 .ndo_stop = mhi_ndo_stop,
118 .ndo_start_xmit = mhi_ndo_xmit,
119 .ndo_get_stats64 = mhi_ndo_get_stats64,
122 static void mhi_net_setup(struct net_device *ndev)
124 ndev->header_ops = NULL; /* No header */
125 ndev->type = ARPHRD_NONE; /* QMAP... */
126 ndev->hard_header_len = 0;
128 ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
129 ndev->netdev_ops = &mhi_netdev_ops;
130 ndev->mtu = MHI_NET_DEFAULT_MTU;
131 ndev->min_mtu = MHI_NET_MIN_MTU;
132 ndev->max_mtu = MHI_NET_MAX_MTU;
133 ndev->tx_queue_len = 1000;
136 static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
137 struct mhi_result *mhi_res)
139 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
140 struct sk_buff *skb = mhi_res->buf_addr;
143 remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued);
145 if (unlikely(mhi_res->transaction_status)) {
146 dev_kfree_skb_any(skb);
148 /* MHI layer stopping/resetting the DL channel */
149 if (mhi_res->transaction_status == -ENOTCONN)
152 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
153 u64_stats_inc(&mhi_netdev->stats.rx_errors);
154 u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
156 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
157 u64_stats_inc(&mhi_netdev->stats.rx_packets);
158 u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
159 u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
161 skb->protocol = htons(ETH_P_MAP);
162 skb_put(skb, mhi_res->bytes_xferd);
166 /* Refill if RX buffers queue becomes low */
167 if (remaining <= mhi_netdev->rx_queue_sz / 2)
168 schedule_delayed_work(&mhi_netdev->rx_refill, 0);
171 static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
172 struct mhi_result *mhi_res)
174 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
175 struct net_device *ndev = mhi_netdev->ndev;
176 struct sk_buff *skb = mhi_res->buf_addr;
178 /* Hardware has consumed the buffer, so free the skb (which is not
179 * freed by the MHI stack) and perform accounting.
181 dev_consume_skb_any(skb);
183 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
184 if (unlikely(mhi_res->transaction_status)) {
186 /* MHI layer stopping/resetting the UL channel */
187 if (mhi_res->transaction_status == -ENOTCONN) {
188 u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
192 u64_stats_inc(&mhi_netdev->stats.tx_errors);
194 u64_stats_inc(&mhi_netdev->stats.tx_packets);
195 u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd);
197 u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
199 if (netif_queue_stopped(ndev))
200 netif_wake_queue(ndev);
203 static void mhi_net_rx_refill_work(struct work_struct *work)
205 struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
207 struct net_device *ndev = mhi_netdev->ndev;
208 struct mhi_device *mdev = mhi_netdev->mdev;
209 int size = READ_ONCE(ndev->mtu);
213 while (atomic_read(&mhi_netdev->stats.rx_queued) < mhi_netdev->rx_queue_sz) {
214 skb = netdev_alloc_skb(ndev, size);
218 err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
220 net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
226 atomic_inc(&mhi_netdev->stats.rx_queued);
228 /* Do not hog the CPU if rx buffers are consumed faster than
234 /* If we're still starved of rx buffers, reschedule later */
235 if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued)))
236 schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
239 static int mhi_net_probe(struct mhi_device *mhi_dev,
240 const struct mhi_device_id *id)
242 const char *netname = (char *)id->driver_data;
243 struct device *dev = &mhi_dev->dev;
244 struct mhi_net_dev *mhi_netdev;
245 struct net_device *ndev;
248 ndev = alloc_netdev(sizeof(*mhi_netdev), netname, NET_NAME_PREDICTABLE,
253 mhi_netdev = netdev_priv(ndev);
254 dev_set_drvdata(dev, mhi_netdev);
255 mhi_netdev->ndev = ndev;
256 mhi_netdev->mdev = mhi_dev;
257 SET_NETDEV_DEV(ndev, &mhi_dev->dev);
259 /* All MHI net channels have 128 ring elements (at least for now) */
260 mhi_netdev->rx_queue_sz = 128;
262 INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
263 u64_stats_init(&mhi_netdev->stats.rx_syncp);
264 u64_stats_init(&mhi_netdev->stats.tx_syncp);
266 /* Start MHI channels */
267 err = mhi_prepare_for_transfer(mhi_dev);
271 err = register_netdev(ndev);
282 static void mhi_net_remove(struct mhi_device *mhi_dev)
284 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
286 unregister_netdev(mhi_netdev->ndev);
288 mhi_unprepare_from_transfer(mhi_netdev->mdev);
290 free_netdev(mhi_netdev->ndev);
293 static const struct mhi_device_id mhi_net_id_table[] = {
294 { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)"mhi_hwip%d" },
295 { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)"mhi_swip%d" },
298 MODULE_DEVICE_TABLE(mhi, mhi_net_id_table);
300 static struct mhi_driver mhi_net_driver = {
301 .probe = mhi_net_probe,
302 .remove = mhi_net_remove,
303 .dl_xfer_cb = mhi_net_dl_callback,
304 .ul_xfer_cb = mhi_net_ul_callback,
305 .id_table = mhi_net_id_table,
308 .owner = THIS_MODULE,
312 module_mhi_driver(mhi_net_driver);
314 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
315 MODULE_DESCRIPTION("Network over MHI");
316 MODULE_LICENSE("GPL v2");