1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* MHI Network driver - Network over MHI bus
4 * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
7 #include <linux/if_arp.h>
9 #include <linux/mod_devicetable.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/skbuff.h>
13 #include <linux/u64_stats_sync.h>
14 #include <linux/wwan.h>
18 #define MHI_NET_MIN_MTU ETH_MIN_MTU
19 #define MHI_NET_MAX_MTU 0xffff
20 #define MHI_NET_DEFAULT_MTU 0x4000
22 /* When set to false, the default netdev (link 0) is not created, and it's up
23 * to user to create the link (via wwan rtnetlink).
25 static bool create_default_iface = true;
26 module_param(create_default_iface, bool, 0);
28 struct mhi_device_info {
30 const struct mhi_net_proto *proto;
33 static int mhi_ndo_open(struct net_device *ndev)
35 struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
37 /* Feed the rx buffer pool */
38 schedule_delayed_work(&mhi_netdev->rx_refill, 0);
40 /* Carrier is established via out-of-band channel (e.g. qmi) */
41 netif_carrier_on(ndev);
43 netif_start_queue(ndev);
48 static int mhi_ndo_stop(struct net_device *ndev)
50 struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
52 netif_stop_queue(ndev);
53 netif_carrier_off(ndev);
54 cancel_delayed_work_sync(&mhi_netdev->rx_refill);
59 static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
61 struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
62 const struct mhi_net_proto *proto = mhi_netdev->proto;
63 struct mhi_device *mdev = mhi_netdev->mdev;
66 if (proto && proto->tx_fixup) {
67 skb = proto->tx_fixup(mhi_netdev, skb);
72 err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
74 net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
76 dev_kfree_skb_any(skb);
80 if (mhi_queue_is_full(mdev, DMA_TO_DEVICE))
81 netif_stop_queue(ndev);
86 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
87 u64_stats_inc(&mhi_netdev->stats.tx_dropped);
88 u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
93 static void mhi_ndo_get_stats64(struct net_device *ndev,
94 struct rtnl_link_stats64 *stats)
96 struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
100 start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp);
101 stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets);
102 stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
103 stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
104 stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped);
105 stats->rx_length_errors = u64_stats_read(&mhi_netdev->stats.rx_length_errors);
106 } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start));
109 start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp);
110 stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets);
111 stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes);
112 stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors);
113 stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped);
114 } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start));
117 static const struct net_device_ops mhi_netdev_ops = {
118 .ndo_open = mhi_ndo_open,
119 .ndo_stop = mhi_ndo_stop,
120 .ndo_start_xmit = mhi_ndo_xmit,
121 .ndo_get_stats64 = mhi_ndo_get_stats64,
124 static void mhi_net_setup(struct net_device *ndev)
126 ndev->header_ops = NULL; /* No header */
127 ndev->type = ARPHRD_RAWIP;
128 ndev->hard_header_len = 0;
130 ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
131 ndev->netdev_ops = &mhi_netdev_ops;
132 ndev->mtu = MHI_NET_DEFAULT_MTU;
133 ndev->min_mtu = MHI_NET_MIN_MTU;
134 ndev->max_mtu = MHI_NET_MAX_MTU;
135 ndev->tx_queue_len = 1000;
138 static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev,
141 struct sk_buff *head = mhi_netdev->skbagg_head;
142 struct sk_buff *tail = mhi_netdev->skbagg_tail;
144 /* This is non-paged skb chaining using frag_list */
146 mhi_netdev->skbagg_head = skb;
150 if (!skb_shinfo(head)->frag_list)
151 skb_shinfo(head)->frag_list = skb;
155 head->len += skb->len;
156 head->data_len += skb->len;
157 head->truesize += skb->truesize;
159 mhi_netdev->skbagg_tail = skb;
161 return mhi_netdev->skbagg_head;
164 static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
165 struct mhi_result *mhi_res)
167 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
168 const struct mhi_net_proto *proto = mhi_netdev->proto;
169 struct sk_buff *skb = mhi_res->buf_addr;
172 free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
174 if (unlikely(mhi_res->transaction_status)) {
175 switch (mhi_res->transaction_status) {
177 /* Packet can not fit in one MHI buffer and has been
178 * split over multiple MHI transfers, do re-aggregation.
179 * That usually means the device side MTU is larger than
180 * the host side MTU/MRU. Since this is not optimal,
181 * print a warning (once).
183 netdev_warn_once(mhi_netdev->ndev,
184 "Fragmented packets received, fix MTU?\n");
185 skb_put(skb, mhi_res->bytes_xferd);
186 mhi_net_skb_agg(mhi_netdev, skb);
189 /* MHI layer stopping/resetting the DL channel */
190 dev_kfree_skb_any(skb);
193 /* Unknown error, simply drop */
194 dev_kfree_skb_any(skb);
195 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
196 u64_stats_inc(&mhi_netdev->stats.rx_errors);
197 u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
200 skb_put(skb, mhi_res->bytes_xferd);
202 if (mhi_netdev->skbagg_head) {
203 /* Aggregate the final fragment */
204 skb = mhi_net_skb_agg(mhi_netdev, skb);
205 mhi_netdev->skbagg_head = NULL;
208 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
209 u64_stats_inc(&mhi_netdev->stats.rx_packets);
210 u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
211 u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
213 switch (skb->data[0] & 0xf0) {
215 skb->protocol = htons(ETH_P_IP);
218 skb->protocol = htons(ETH_P_IPV6);
221 skb->protocol = htons(ETH_P_MAP);
225 if (proto && proto->rx)
226 proto->rx(mhi_netdev, skb);
231 /* Refill if RX buffers queue becomes low */
232 if (free_desc_count >= mhi_netdev->rx_queue_sz / 2)
233 schedule_delayed_work(&mhi_netdev->rx_refill, 0);
236 static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
237 struct mhi_result *mhi_res)
239 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
240 struct net_device *ndev = mhi_netdev->ndev;
241 struct mhi_device *mdev = mhi_netdev->mdev;
242 struct sk_buff *skb = mhi_res->buf_addr;
244 /* Hardware has consumed the buffer, so free the skb (which is not
245 * freed by the MHI stack) and perform accounting.
247 dev_consume_skb_any(skb);
249 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
250 if (unlikely(mhi_res->transaction_status)) {
252 /* MHI layer stopping/resetting the UL channel */
253 if (mhi_res->transaction_status == -ENOTCONN) {
254 u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
258 u64_stats_inc(&mhi_netdev->stats.tx_errors);
260 u64_stats_inc(&mhi_netdev->stats.tx_packets);
261 u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd);
263 u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
265 if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mdev, DMA_TO_DEVICE))
266 netif_wake_queue(ndev);
269 static void mhi_net_rx_refill_work(struct work_struct *work)
271 struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
273 struct net_device *ndev = mhi_netdev->ndev;
274 struct mhi_device *mdev = mhi_netdev->mdev;
279 size = mhi_netdev->mru ? mhi_netdev->mru : READ_ONCE(ndev->mtu);
281 while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
282 skb = netdev_alloc_skb(ndev, size);
286 err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
288 net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
294 /* Do not hog the CPU if rx buffers are consumed faster than
300 /* If we're still starved of rx buffers, reschedule later */
301 if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz)
302 schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
305 static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
306 struct netlink_ext_ack *extack)
308 const struct mhi_device_info *info;
309 struct mhi_device *mhi_dev = ctxt;
310 struct mhi_net_dev *mhi_netdev;
313 info = (struct mhi_device_info *)mhi_dev->id->driver_data;
315 /* For now we only support one link (link context 0), driver must be
316 * reworked to break 1:1 relationship for net MBIM and to forward setup
317 * call to rmnet(QMAP) otherwise.
322 if (dev_get_drvdata(&mhi_dev->dev))
325 mhi_netdev = wwan_netdev_drvpriv(ndev);
327 dev_set_drvdata(&mhi_dev->dev, mhi_netdev);
328 mhi_netdev->ndev = ndev;
329 mhi_netdev->mdev = mhi_dev;
330 mhi_netdev->skbagg_head = NULL;
331 mhi_netdev->proto = info->proto;
333 INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
334 u64_stats_init(&mhi_netdev->stats.rx_syncp);
335 u64_stats_init(&mhi_netdev->stats.tx_syncp);
337 /* Start MHI channels */
338 err = mhi_prepare_for_transfer(mhi_dev);
342 /* Number of transfer descriptors determines size of the queue */
343 mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
346 err = register_netdevice(ndev);
348 err = register_netdev(ndev);
352 if (mhi_netdev->proto) {
353 err = mhi_netdev->proto->init(mhi_netdev);
361 unregister_netdevice(ndev);
367 static void mhi_net_dellink(void *ctxt, struct net_device *ndev,
368 struct list_head *head)
370 struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
371 struct mhi_device *mhi_dev = ctxt;
374 unregister_netdevice_queue(ndev, head);
376 unregister_netdev(ndev);
378 mhi_unprepare_from_transfer(mhi_dev);
380 kfree_skb(mhi_netdev->skbagg_head);
382 dev_set_drvdata(&mhi_dev->dev, NULL);
385 static const struct wwan_ops mhi_wwan_ops = {
386 .priv_size = sizeof(struct mhi_net_dev),
387 .setup = mhi_net_setup,
388 .newlink = mhi_net_newlink,
389 .dellink = mhi_net_dellink,
392 static int mhi_net_probe(struct mhi_device *mhi_dev,
393 const struct mhi_device_id *id)
395 const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data;
396 struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
397 struct net_device *ndev;
400 err = wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_wwan_ops, mhi_dev,
401 WWAN_NO_DEFAULT_LINK);
405 if (!create_default_iface)
408 /* Create a default interface which is used as either RMNET real-dev,
409 * MBIM link 0 or ip link 0)
411 ndev = alloc_netdev(sizeof(struct mhi_net_dev), info->netname,
412 NET_NAME_PREDICTABLE, mhi_net_setup);
418 SET_NETDEV_DEV(ndev, &mhi_dev->dev);
420 err = mhi_net_newlink(mhi_dev, ndev, 0, NULL);
429 wwan_unregister_ops(&cntrl->mhi_dev->dev);
434 static void mhi_net_remove(struct mhi_device *mhi_dev)
436 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
437 struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
439 /* WWAN core takes care of removing remaining links */
440 wwan_unregister_ops(&cntrl->mhi_dev->dev);
442 if (create_default_iface)
443 mhi_net_dellink(mhi_dev, mhi_netdev->ndev, NULL);
446 static const struct mhi_device_info mhi_hwip0 = {
447 .netname = "mhi_hwip%d",
450 static const struct mhi_device_info mhi_swip0 = {
451 .netname = "mhi_swip%d",
454 static const struct mhi_device_info mhi_hwip0_mbim = {
455 .netname = "mhi_mbim%d",
456 .proto = &proto_mbim,
459 static const struct mhi_device_id mhi_net_id_table[] = {
460 /* Hardware accelerated data PATH (to modem IPA), protocol agnostic */
461 { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)&mhi_hwip0 },
462 /* Software data PATH (to modem CPU) */
463 { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)&mhi_swip0 },
464 /* Hardware accelerated data PATH (to modem IPA), MBIM protocol */
465 { .chan = "IP_HW0_MBIM", .driver_data = (kernel_ulong_t)&mhi_hwip0_mbim },
468 MODULE_DEVICE_TABLE(mhi, mhi_net_id_table);
470 static struct mhi_driver mhi_net_driver = {
471 .probe = mhi_net_probe,
472 .remove = mhi_net_remove,
473 .dl_xfer_cb = mhi_net_dl_callback,
474 .ul_xfer_cb = mhi_net_ul_callback,
475 .id_table = mhi_net_id_table,
478 .owner = THIS_MODULE,
482 module_mhi_driver(mhi_net_driver);
484 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
485 MODULE_DESCRIPTION("Network over MHI");
486 MODULE_LICENSE("GPL v2");