1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* MHI Network driver - Network over MHI bus
4 * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
7 #include <linux/if_arp.h>
9 #include <linux/mod_devicetable.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/skbuff.h>
13 #include <linux/u64_stats_sync.h>
17 #define MHI_NET_MIN_MTU ETH_MIN_MTU
18 #define MHI_NET_MAX_MTU 0xffff
19 #define MHI_NET_DEFAULT_MTU 0x4000
21 struct mhi_device_info {
23 const struct mhi_net_proto *proto;
26 static int mhi_ndo_open(struct net_device *ndev)
28 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
30 /* Feed the rx buffer pool */
31 schedule_delayed_work(&mhi_netdev->rx_refill, 0);
33 /* Carrier is established via out-of-band channel (e.g. qmi) */
34 netif_carrier_on(ndev);
36 netif_start_queue(ndev);
41 static int mhi_ndo_stop(struct net_device *ndev)
43 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
45 netif_stop_queue(ndev);
46 netif_carrier_off(ndev);
47 cancel_delayed_work_sync(&mhi_netdev->rx_refill);
52 static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
54 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
55 const struct mhi_net_proto *proto = mhi_netdev->proto;
56 struct mhi_device *mdev = mhi_netdev->mdev;
59 if (proto && proto->tx_fixup) {
60 skb = proto->tx_fixup(mhi_netdev, skb);
65 err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
67 net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
69 dev_kfree_skb_any(skb);
73 if (mhi_queue_is_full(mdev, DMA_TO_DEVICE))
74 netif_stop_queue(ndev);
79 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
80 u64_stats_inc(&mhi_netdev->stats.tx_dropped);
81 u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
86 static void mhi_ndo_get_stats64(struct net_device *ndev,
87 struct rtnl_link_stats64 *stats)
89 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
93 start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp);
94 stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets);
95 stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
96 stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
97 stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped);
98 stats->rx_length_errors = u64_stats_read(&mhi_netdev->stats.rx_length_errors);
99 } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start));
102 start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp);
103 stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets);
104 stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes);
105 stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors);
106 stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped);
107 } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start));
110 static const struct net_device_ops mhi_netdev_ops = {
111 .ndo_open = mhi_ndo_open,
112 .ndo_stop = mhi_ndo_stop,
113 .ndo_start_xmit = mhi_ndo_xmit,
114 .ndo_get_stats64 = mhi_ndo_get_stats64,
117 static void mhi_net_setup(struct net_device *ndev)
119 ndev->header_ops = NULL; /* No header */
120 ndev->type = ARPHRD_RAWIP;
121 ndev->hard_header_len = 0;
123 ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
124 ndev->netdev_ops = &mhi_netdev_ops;
125 ndev->mtu = MHI_NET_DEFAULT_MTU;
126 ndev->min_mtu = MHI_NET_MIN_MTU;
127 ndev->max_mtu = MHI_NET_MAX_MTU;
128 ndev->tx_queue_len = 1000;
131 static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev,
134 struct sk_buff *head = mhi_netdev->skbagg_head;
135 struct sk_buff *tail = mhi_netdev->skbagg_tail;
137 /* This is non-paged skb chaining using frag_list */
139 mhi_netdev->skbagg_head = skb;
143 if (!skb_shinfo(head)->frag_list)
144 skb_shinfo(head)->frag_list = skb;
148 head->len += skb->len;
149 head->data_len += skb->len;
150 head->truesize += skb->truesize;
152 mhi_netdev->skbagg_tail = skb;
154 return mhi_netdev->skbagg_head;
157 static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
158 struct mhi_result *mhi_res)
160 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
161 const struct mhi_net_proto *proto = mhi_netdev->proto;
162 struct sk_buff *skb = mhi_res->buf_addr;
165 free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
167 if (unlikely(mhi_res->transaction_status)) {
168 switch (mhi_res->transaction_status) {
170 /* Packet can not fit in one MHI buffer and has been
171 * split over multiple MHI transfers, do re-aggregation.
172 * That usually means the device side MTU is larger than
173 * the host side MTU/MRU. Since this is not optimal,
174 * print a warning (once).
176 netdev_warn_once(mhi_netdev->ndev,
177 "Fragmented packets received, fix MTU?\n");
178 skb_put(skb, mhi_res->bytes_xferd);
179 mhi_net_skb_agg(mhi_netdev, skb);
182 /* MHI layer stopping/resetting the DL channel */
183 dev_kfree_skb_any(skb);
186 /* Unknown error, simply drop */
187 dev_kfree_skb_any(skb);
188 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
189 u64_stats_inc(&mhi_netdev->stats.rx_errors);
190 u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
193 skb_put(skb, mhi_res->bytes_xferd);
195 if (mhi_netdev->skbagg_head) {
196 /* Aggregate the final fragment */
197 skb = mhi_net_skb_agg(mhi_netdev, skb);
198 mhi_netdev->skbagg_head = NULL;
201 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
202 u64_stats_inc(&mhi_netdev->stats.rx_packets);
203 u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
204 u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
206 switch (skb->data[0] & 0xf0) {
208 skb->protocol = htons(ETH_P_IP);
211 skb->protocol = htons(ETH_P_IPV6);
214 skb->protocol = htons(ETH_P_MAP);
218 if (proto && proto->rx)
219 proto->rx(mhi_netdev, skb);
224 /* Refill if RX buffers queue becomes low */
225 if (free_desc_count >= mhi_netdev->rx_queue_sz / 2)
226 schedule_delayed_work(&mhi_netdev->rx_refill, 0);
229 static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
230 struct mhi_result *mhi_res)
232 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
233 struct net_device *ndev = mhi_netdev->ndev;
234 struct mhi_device *mdev = mhi_netdev->mdev;
235 struct sk_buff *skb = mhi_res->buf_addr;
237 /* Hardware has consumed the buffer, so free the skb (which is not
238 * freed by the MHI stack) and perform accounting.
240 dev_consume_skb_any(skb);
242 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
243 if (unlikely(mhi_res->transaction_status)) {
245 /* MHI layer stopping/resetting the UL channel */
246 if (mhi_res->transaction_status == -ENOTCONN) {
247 u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
251 u64_stats_inc(&mhi_netdev->stats.tx_errors);
253 u64_stats_inc(&mhi_netdev->stats.tx_packets);
254 u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd);
256 u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
258 if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mdev, DMA_TO_DEVICE))
259 netif_wake_queue(ndev);
262 static void mhi_net_rx_refill_work(struct work_struct *work)
264 struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
266 struct net_device *ndev = mhi_netdev->ndev;
267 struct mhi_device *mdev = mhi_netdev->mdev;
272 size = mhi_netdev->mru ? mhi_netdev->mru : READ_ONCE(ndev->mtu);
274 while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
275 skb = netdev_alloc_skb(ndev, size);
279 err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
281 net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
287 /* Do not hog the CPU if rx buffers are consumed faster than
293 /* If we're still starved of rx buffers, reschedule later */
294 if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz)
295 schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
298 static struct device_type wwan_type = {
302 static int mhi_net_probe(struct mhi_device *mhi_dev,
303 const struct mhi_device_id *id)
305 const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data;
306 struct device *dev = &mhi_dev->dev;
307 struct mhi_net_dev *mhi_netdev;
308 struct net_device *ndev;
311 ndev = alloc_netdev(sizeof(*mhi_netdev), info->netname,
312 NET_NAME_PREDICTABLE, mhi_net_setup);
316 mhi_netdev = netdev_priv(ndev);
317 dev_set_drvdata(dev, mhi_netdev);
318 mhi_netdev->ndev = ndev;
319 mhi_netdev->mdev = mhi_dev;
320 mhi_netdev->skbagg_head = NULL;
321 mhi_netdev->proto = info->proto;
322 SET_NETDEV_DEV(ndev, &mhi_dev->dev);
323 SET_NETDEV_DEVTYPE(ndev, &wwan_type);
325 INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
326 u64_stats_init(&mhi_netdev->stats.rx_syncp);
327 u64_stats_init(&mhi_netdev->stats.tx_syncp);
329 /* Start MHI channels */
330 err = mhi_prepare_for_transfer(mhi_dev);
334 /* Number of transfer descriptors determines size of the queue */
335 mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
337 err = register_netdev(ndev);
341 if (mhi_netdev->proto) {
342 err = mhi_netdev->proto->init(mhi_netdev);
350 unregister_netdev(ndev);
356 static void mhi_net_remove(struct mhi_device *mhi_dev)
358 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
360 unregister_netdev(mhi_netdev->ndev);
362 mhi_unprepare_from_transfer(mhi_netdev->mdev);
364 kfree_skb(mhi_netdev->skbagg_head);
366 free_netdev(mhi_netdev->ndev);
369 static const struct mhi_device_info mhi_hwip0 = {
370 .netname = "mhi_hwip%d",
373 static const struct mhi_device_info mhi_swip0 = {
374 .netname = "mhi_swip%d",
377 static const struct mhi_device_info mhi_hwip0_mbim = {
378 .netname = "mhi_mbim%d",
379 .proto = &proto_mbim,
382 static const struct mhi_device_id mhi_net_id_table[] = {
383 /* Hardware accelerated data PATH (to modem IPA), protocol agnostic */
384 { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)&mhi_hwip0 },
385 /* Software data PATH (to modem CPU) */
386 { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)&mhi_swip0 },
387 /* Hardware accelerated data PATH (to modem IPA), MBIM protocol */
388 { .chan = "IP_HW0_MBIM", .driver_data = (kernel_ulong_t)&mhi_hwip0_mbim },
391 MODULE_DEVICE_TABLE(mhi, mhi_net_id_table);
393 static struct mhi_driver mhi_net_driver = {
394 .probe = mhi_net_probe,
395 .remove = mhi_net_remove,
396 .dl_xfer_cb = mhi_net_dl_callback,
397 .ul_xfer_cb = mhi_net_ul_callback,
398 .id_table = mhi_net_id_table,
401 .owner = THIS_MODULE,
405 module_mhi_driver(mhi_net_driver);
407 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
408 MODULE_DESCRIPTION("Network over MHI");
409 MODULE_LICENSE("GPL v2");