2 * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/etherdevice.h>
18 #include <net/ieee80211_radiotap.h>
19 #include <linux/if_arp.h>
20 #include <linux/moduleparam.h>
22 #include <linux/ipv6.h>
24 #include <linux/prefetch.h>
31 static bool rtap_include_phy_info;
32 module_param(rtap_include_phy_info, bool, S_IRUGO);
33 MODULE_PARM_DESC(rtap_include_phy_info,
34 " Include PHY info in the radiotap header, default - no");
37 module_param(rx_align_2, bool, S_IRUGO);
38 MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
40 static inline uint wil_rx_snaplen(void)
42 return rx_align_2 ? 6 : 0;
45 static inline int wil_vring_is_empty(struct vring *vring)
47 return vring->swhead == vring->swtail;
50 static inline u32 wil_vring_next_tail(struct vring *vring)
52 return (vring->swtail + 1) % vring->size;
55 static inline void wil_vring_advance_head(struct vring *vring, int n)
57 vring->swhead = (vring->swhead + n) % vring->size;
60 static inline int wil_vring_is_full(struct vring *vring)
62 return wil_vring_next_tail(vring) == vring->swhead;
65 /* Used space in Tx Vring */
66 static inline int wil_vring_used_tx(struct vring *vring)
68 u32 swhead = vring->swhead;
69 u32 swtail = vring->swtail;
70 return (vring->size + swhead - swtail) % vring->size;
73 /* Available space in Tx Vring */
74 static inline int wil_vring_avail_tx(struct vring *vring)
76 return vring->size - wil_vring_used_tx(vring) - 1;
79 /* wil_vring_wmark_low - low watermark for available descriptor space */
80 static inline int wil_vring_wmark_low(struct vring *vring)
85 /* wil_vring_wmark_high - high watermark for available descriptor space */
86 static inline int wil_vring_wmark_high(struct vring *vring)
91 /* wil_val_in_range - check if value in [min,max) */
92 static inline bool wil_val_in_range(int val, int min, int max)
94 return val >= min && val < max;
97 static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
99 struct device *dev = wil_to_dev(wil);
100 size_t sz = vring->size * sizeof(vring->va[0]);
103 wil_dbg_misc(wil, "%s()\n", __func__);
105 BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
109 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
114 /* vring->va should be aligned on its size rounded up to power of 2
115 * This is granted by the dma_alloc_coherent
117 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
123 /* initially, all descriptors are SW owned
124 * For Tx and Rx, ownership bit is at the same location, thus
127 for (i = 0; i < vring->size; i++) {
128 volatile struct vring_tx_desc *_d = &vring->va[i].tx;
130 _d->dma.status = TX_DMA_STATUS_DU;
133 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
134 vring->va, &vring->pa, vring->ctx);
139 static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
142 dma_addr_t pa = wil_desc_addr(&d->dma.addr);
143 u16 dmalen = le16_to_cpu(d->dma.length);
145 switch (ctx->mapped_as) {
146 case wil_mapped_as_single:
147 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
149 case wil_mapped_as_page:
150 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
157 static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
160 struct device *dev = wil_to_dev(wil);
161 size_t sz = vring->size * sizeof(vring->va[0]);
164 int vring_index = vring - wil->vring_tx;
166 wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
167 vring_index, vring->size, vring->va,
168 &vring->pa, vring->ctx);
170 wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
171 vring->size, vring->va,
172 &vring->pa, vring->ctx);
175 while (!wil_vring_is_empty(vring)) {
181 struct vring_tx_desc dd, *d = ⅆ
182 volatile struct vring_tx_desc *_d =
183 &vring->va[vring->swtail].tx;
185 ctx = &vring->ctx[vring->swtail];
187 wil_txdesc_unmap(dev, d, ctx);
189 dev_kfree_skb_any(ctx->skb);
190 vring->swtail = wil_vring_next_tail(vring);
192 struct vring_rx_desc dd, *d = ⅆ
193 volatile struct vring_rx_desc *_d =
194 &vring->va[vring->swhead].rx;
196 ctx = &vring->ctx[vring->swhead];
198 pa = wil_desc_addr(&d->dma.addr);
199 dmalen = le16_to_cpu(d->dma.length);
200 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
202 wil_vring_advance_head(vring, 1);
205 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
213 * Allocate one skb for Rx VRING
215 * Safe to call from IRQ
217 static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
220 struct device *dev = wil_to_dev(wil);
221 unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
222 struct vring_rx_desc dd, *d = ⅆ
223 volatile struct vring_rx_desc *_d = &vring->va[i].rx;
225 struct sk_buff *skb = dev_alloc_skb(sz + headroom);
230 skb_reserve(skb, headroom);
233 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
234 if (unlikely(dma_mapping_error(dev, pa))) {
239 d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
240 wil_desc_addr_set(&d->dma.addr, pa);
241 /* ip_length don't care */
243 /* error don't care */
244 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
245 d->dma.length = cpu_to_le16(sz);
247 vring->ctx[i].skb = skb;
253 * Adds radiotap header
255 * Any error indicated as "Bad FCS"
257 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
258 * - Rx descriptor: 32 bytes
261 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
264 struct wireless_dev *wdev = wil->wdev;
265 struct wil6210_rtap {
266 struct ieee80211_radiotap_header rthdr;
267 /* fields should be in the order of bits in rthdr.it_present */
271 __le16 chnl_freq __aligned(2);
278 struct wil6210_rtap_vendor {
279 struct wil6210_rtap rtap;
281 u8 vendor_oui[3] __aligned(2);
286 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
287 struct wil6210_rtap_vendor *rtap_vendor;
288 int rtap_len = sizeof(struct wil6210_rtap);
289 int phy_length = 0; /* phy info header size, bytes */
290 static char phy_data[128];
291 struct ieee80211_channel *ch = wdev->preset_chandef.chan;
293 if (rtap_include_phy_info) {
294 rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
295 /* calculate additional length */
296 if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
298 * PHY info starts from 8-byte boundary
299 * there are 8-byte lines, last line may be partially
300 * written (HW bug), thus FW configures for last line
301 * to be excessive. Driver skips this last line.
303 int len = min_t(int, 8 + sizeof(phy_data),
304 wil_rxdesc_phy_length(d));
307 void *p = skb_tail_pointer(skb);
308 void *pa = PTR_ALIGN(p, 8);
310 if (skb_tailroom(skb) >= len + (pa - p)) {
311 phy_length = len - 8;
312 memcpy(phy_data, pa, phy_length);
316 rtap_len += phy_length;
319 if (skb_headroom(skb) < rtap_len &&
320 pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
321 wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
325 rtap_vendor = (void *)skb_push(skb, rtap_len);
326 memset(rtap_vendor, 0, rtap_len);
328 rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
329 rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
330 rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
331 (1 << IEEE80211_RADIOTAP_FLAGS) |
332 (1 << IEEE80211_RADIOTAP_CHANNEL) |
333 (1 << IEEE80211_RADIOTAP_MCS));
334 if (d->dma.status & RX_DMA_STATUS_ERROR)
335 rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
337 rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
338 rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
340 rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
341 rtap_vendor->rtap.mcs_flags = 0;
342 rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
344 if (rtap_include_phy_info) {
345 rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
346 IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
347 /* OUI for Wilocity 04:ce:14 */
348 rtap_vendor->vendor_oui[0] = 0x04;
349 rtap_vendor->vendor_oui[1] = 0xce;
350 rtap_vendor->vendor_oui[2] = 0x14;
351 rtap_vendor->vendor_ns = 1;
352 /* Rx descriptor + PHY data */
353 rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
355 memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
356 memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
362 * reap 1 frame from @swhead
364 * Rx descriptor copied to skb->cb
366 * Safe to call from IRQ
368 static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
371 struct device *dev = wil_to_dev(wil);
372 struct net_device *ndev = wil_to_ndev(wil);
373 volatile struct vring_rx_desc *_d;
374 struct vring_rx_desc *d;
377 unsigned int snaplen = wil_rx_snaplen();
378 unsigned int sz = mtu_max + ETH_HLEN + snaplen;
382 int i = (int)vring->swhead;
383 struct wil_net_stats *stats;
385 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
387 if (unlikely(wil_vring_is_empty(vring)))
390 _d = &vring->va[i].rx;
391 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
392 /* it is not error, we just reached end of Rx done area */
396 skb = vring->ctx[i].skb;
397 vring->ctx[i].skb = NULL;
398 wil_vring_advance_head(vring, 1);
400 wil_err(wil, "No Rx skb at [%d]\n", i);
403 d = wil_skb_rxdesc(skb);
405 pa = wil_desc_addr(&d->dma.addr);
407 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
408 dmalen = le16_to_cpu(d->dma.length);
410 trace_wil6210_rx(i, d);
411 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
412 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
413 (const void *)d, sizeof(*d), false);
415 if (unlikely(dmalen > sz)) {
416 wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
420 skb_trim(skb, dmalen);
424 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
425 skb->data, skb_headlen(skb), false);
427 cid = wil_rxdesc_cid(d);
428 stats = &wil->sta[cid].stats;
429 stats->last_mcs_rx = wil_rxdesc_mcs(d);
430 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
431 stats->rx_per_mcs[stats->last_mcs_rx]++;
433 /* use radiotap header only if required */
434 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
435 wil_rx_add_radiotap_header(wil, skb);
437 /* no extra checks if in sniffer mode */
438 if (ndev->type != ARPHRD_ETHER)
441 * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
442 * Driver should recognize it by frame type, that is found
443 * in Rx descriptor. If type is not data, it is 802.11 frame as is
445 ftype = wil_rxdesc_ftype(d) << 2;
446 if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
447 wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
448 /* TODO: process it */
453 if (unlikely(skb->len < ETH_HLEN + snaplen)) {
454 wil_err(wil, "Short frame, len = %d\n", skb->len);
455 /* TODO: process it (i.e. BAR) */
460 /* L4 IDENT is on when HW calculated checksum, check status
461 * and in case of error drop the packet
462 * higher stack layers will handle retransmission (if required)
464 if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
465 /* L4 protocol identified, csum calculated */
466 if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
467 skb->ip_summed = CHECKSUM_UNNECESSARY;
468 /* If HW reports bad checksum, let IP stack re-check it
469 * For example, HW don't understand Microsoft IP stack that
470 * mis-calculates TCP checksum - if it should be 0x0,
471 * it writes 0xffff in violation of RFC 1624
477 * +-------+-------+---------+------------+------+
478 * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
479 * +-------+-------+---------+------------+------+
480 * Need to remove SNAP, shifting SA and DA forward
482 memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
483 skb_pull(skb, snaplen);
490 * allocate and fill up to @count buffers in rx ring
491 * buffers posted at @swtail
493 static int wil_rx_refill(struct wil6210_priv *wil, int count)
495 struct net_device *ndev = wil_to_ndev(wil);
496 struct vring *v = &wil->vring_rx;
499 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
500 WIL6210_RTAP_SIZE : 0;
502 for (; next_tail = wil_vring_next_tail(v),
503 (next_tail != v->swhead) && (count-- > 0);
504 v->swtail = next_tail) {
505 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
507 wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
512 wil_w(wil, v->hwtail, v->swtail);
518 * Pass Rx packet to the netif. Update statistics.
519 * Called in softirq context (NAPI poll).
521 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
523 gro_result_t rc = GRO_NORMAL;
524 struct wil6210_priv *wil = ndev_to_wil(ndev);
525 struct wireless_dev *wdev = wil_to_wdev(wil);
526 unsigned int len = skb->len;
527 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
528 int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
529 struct ethhdr *eth = (void *)skb->data;
530 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
531 * is not suitable, need to look at data
533 int mcast = is_multicast_ether_addr(eth->h_dest);
534 struct wil_net_stats *stats = &wil->sta[cid].stats;
535 struct sk_buff *xmit_skb = NULL;
536 static const char * const gro_res_str[] = {
537 [GRO_MERGED] = "GRO_MERGED",
538 [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
539 [GRO_HELD] = "GRO_HELD",
540 [GRO_NORMAL] = "GRO_NORMAL",
541 [GRO_DROP] = "GRO_DROP",
544 if (ndev->features & NETIF_F_RXHASH)
545 /* fake L4 to ensure it won't be re-calculated later
546 * set hash to any non-zero value to activate rps
547 * mechanism, core will be chosen according
548 * to user-level rps configuration.
550 skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
554 if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
556 /* send multicast frames both to higher layers in
557 * local net stack and back to the wireless medium
559 xmit_skb = skb_copy(skb, GFP_ATOMIC);
561 int xmit_cid = wil_find_cid(wil, eth->h_dest);
564 /* The destination station is associated to
565 * this AP (in this VLAN), so send the frame
566 * directly to it and do not pass it to local
575 /* Send to wireless media and increase priority by 256 to
576 * keep the received priority instead of reclassifying
577 * the frame (see cfg80211_classify8021d).
579 xmit_skb->dev = ndev;
580 xmit_skb->priority += 256;
581 xmit_skb->protocol = htons(ETH_P_802_3);
582 skb_reset_network_header(xmit_skb);
583 skb_reset_mac_header(xmit_skb);
584 wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
585 dev_queue_xmit(xmit_skb);
588 if (skb) { /* deliver to local stack */
590 skb->protocol = eth_type_trans(skb, ndev);
591 rc = napi_gro_receive(&wil->napi_rx, skb);
592 wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
593 len, gro_res_str[rc]);
595 /* statistics. rc set to GRO_NORMAL for AP bridging */
596 if (unlikely(rc == GRO_DROP)) {
597 ndev->stats.rx_dropped++;
599 wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
601 ndev->stats.rx_packets++;
603 ndev->stats.rx_bytes += len;
604 stats->rx_bytes += len;
606 ndev->stats.multicast++;
611 * Proceed all completed skb's from Rx VRING
613 * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
615 void wil_rx_handle(struct wil6210_priv *wil, int *quota)
617 struct net_device *ndev = wil_to_ndev(wil);
618 struct vring *v = &wil->vring_rx;
621 if (unlikely(!v->va)) {
622 wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
625 wil_dbg_txrx(wil, "%s()\n", __func__);
626 while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
629 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
631 skb_reset_mac_header(skb);
632 skb->ip_summed = CHECKSUM_UNNECESSARY;
633 skb->pkt_type = PACKET_OTHERHOST;
634 skb->protocol = htons(ETH_P_802_2);
635 wil_netif_rx_any(skb, ndev);
637 wil_rx_reorder(wil, skb);
640 wil_rx_refill(wil, v->size);
643 int wil_rx_init(struct wil6210_priv *wil, u16 size)
645 struct vring *vring = &wil->vring_rx;
648 wil_dbg_misc(wil, "%s()\n", __func__);
651 wil_err(wil, "Rx ring already allocated\n");
656 rc = wil_vring_alloc(wil, vring);
660 rc = wmi_rx_chain_add(wil, vring);
664 rc = wil_rx_refill(wil, vring->size);
670 wil_vring_free(wil, vring, 0);
675 void wil_rx_fini(struct wil6210_priv *wil)
677 struct vring *vring = &wil->vring_rx;
679 wil_dbg_misc(wil, "%s()\n", __func__);
682 wil_vring_free(wil, vring, 0);
685 int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
689 struct wmi_vring_cfg_cmd cmd = {
690 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
694 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
695 .ring_size = cpu_to_le16(size),
698 .cidxtid = mk_cidxtid(cid, tid),
699 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
704 .priority = cpu_to_le16(0),
705 .timeslot_us = cpu_to_le16(0xfff),
710 struct wil6210_mbox_hdr_wmi wmi;
711 struct wmi_vring_cfg_done_event cmd;
713 struct vring *vring = &wil->vring_tx[id];
714 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
716 wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
717 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
720 wil_err(wil, "Tx ring [%d] already allocated\n", id);
725 memset(txdata, 0, sizeof(*txdata));
726 spin_lock_init(&txdata->lock);
728 rc = wil_vring_alloc(wil, vring);
732 wil->vring2cid_tid[id][0] = cid;
733 wil->vring2cid_tid[id][1] = tid;
735 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
738 txdata->dot1x_open = true;
739 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
740 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
744 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
745 wil_err(wil, "Tx config failed, status 0x%02x\n",
750 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
753 if (txdata->dot1x_open && (agg_wsize >= 0))
754 wil_addba_tx_request(wil, id, agg_wsize);
758 txdata->dot1x_open = false;
760 wil_vring_free(wil, vring, 1);
766 int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
769 struct wmi_bcast_vring_cfg_cmd cmd = {
770 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
774 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
775 .ring_size = cpu_to_le16(size),
778 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
782 struct wil6210_mbox_hdr_wmi wmi;
783 struct wmi_vring_cfg_done_event cmd;
785 struct vring *vring = &wil->vring_tx[id];
786 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
788 wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
789 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
792 wil_err(wil, "Tx ring [%d] already allocated\n", id);
797 memset(txdata, 0, sizeof(*txdata));
798 spin_lock_init(&txdata->lock);
800 rc = wil_vring_alloc(wil, vring);
804 wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
805 wil->vring2cid_tid[id][1] = 0; /* TID */
807 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
810 txdata->dot1x_open = true;
811 rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
812 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
816 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
817 wil_err(wil, "Tx config failed, status 0x%02x\n",
822 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
829 txdata->dot1x_open = false;
830 wil_vring_free(wil, vring, 1);
836 void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
838 struct vring *vring = &wil->vring_tx[id];
839 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
841 WARN_ON(!mutex_is_locked(&wil->mutex));
846 wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
848 spin_lock_bh(&txdata->lock);
849 txdata->dot1x_open = false;
850 txdata->enabled = 0; /* no Tx can be in progress or start anew */
851 spin_unlock_bh(&txdata->lock);
852 /* make sure NAPI won't touch this vring */
853 if (test_bit(wil_status_napi_en, wil->status))
854 napi_synchronize(&wil->napi_tx);
856 wil_vring_free(wil, vring, 1);
857 memset(txdata, 0, sizeof(*txdata));
860 static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
864 struct ethhdr *eth = (void *)skb->data;
865 int cid = wil_find_cid(wil, eth->h_dest);
870 /* TODO: fix for multiple TID */
871 for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
872 if (!wil->vring_tx_data[i].dot1x_open &&
873 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
875 if (wil->vring2cid_tid[i][0] == cid) {
876 struct vring *v = &wil->vring_tx[i];
878 wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n",
879 __func__, eth->h_dest, i);
883 wil_dbg_txrx(wil, "vring[%d] not valid\n", i);
892 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
893 struct sk_buff *skb);
895 static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
902 /* In the STA mode, it is expected to have only 1 VRING
903 * for the AP we connected to.
904 * find 1-st vring eligible for this skb and use it.
906 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
907 v = &wil->vring_tx[i];
911 cid = wil->vring2cid_tid[i][0];
912 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
915 if (!wil->vring_tx_data[i].dot1x_open &&
916 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
919 wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
924 wil_dbg_txrx(wil, "Tx while no vrings active?\n");
929 /* Use one of 2 strategies:
931 * 1. New (real broadcast):
932 * use dedicated broadcast vring
933 * 2. Old (pseudo-DMS):
934 * Find 1-st vring and return it;
935 * duplicate skb and send it to other active vrings;
936 * in all cases override dest address to unicast peer's address
937 * Use old strategy when new is not supported yet:
940 static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
944 int i = wil->bcast_vring;
948 v = &wil->vring_tx[i];
951 if (!wil->vring_tx_data[i].dot1x_open &&
952 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
958 static void wil_set_da_for_vring(struct wil6210_priv *wil,
959 struct sk_buff *skb, int vring_index)
961 struct ethhdr *eth = (void *)skb->data;
962 int cid = wil->vring2cid_tid[vring_index][0];
964 ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
967 static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
970 struct vring *v, *v2;
971 struct sk_buff *skb2;
974 struct ethhdr *eth = (void *)skb->data;
975 char *src = eth->h_source;
977 /* find 1-st vring eligible for data */
978 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
979 v = &wil->vring_tx[i];
983 cid = wil->vring2cid_tid[i][0];
984 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
986 if (!wil->vring_tx_data[i].dot1x_open &&
987 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
990 /* don't Tx back to source when re-routing Rx->Tx at the AP */
991 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
997 wil_dbg_txrx(wil, "Tx while no vrings active?\n");
1002 wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
1003 wil_set_da_for_vring(wil, skb, i);
1005 /* find other active vrings and duplicate skb for each */
1006 for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
1007 v2 = &wil->vring_tx[i];
1010 cid = wil->vring2cid_tid[i][0];
1011 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
1013 if (!wil->vring_tx_data[i].dot1x_open &&
1014 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
1017 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
1020 skb2 = skb_copy(skb, GFP_ATOMIC);
1022 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
1023 wil_set_da_for_vring(wil, skb2, i);
1024 wil_tx_vring(wil, v2, skb2);
1026 wil_err(wil, "skb_copy failed\n");
1033 static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
1034 struct sk_buff *skb)
1036 struct wireless_dev *wdev = wil->wdev;
1038 if (wdev->iftype != NL80211_IFTYPE_AP)
1039 return wil_find_tx_bcast_2(wil, skb);
1041 return wil_find_tx_bcast_1(wil, skb);
1044 static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
1047 wil_desc_addr_set(&d->dma.addr, pa);
1048 d->dma.ip_length = 0;
1049 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
1050 d->dma.b11 = 0/*14 | BIT(7)*/;
1052 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
1053 d->dma.length = cpu_to_le16((u16)len);
1054 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
1058 d->mac.ucode_cmd = 0;
1059 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
1060 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
1061 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
1067 void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
1069 d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
1073 * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
1074 * @skb is used to obtain the protocol and headers length.
1075 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
1076 * 2 - middle, 3 - last descriptor.
1079 static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
1080 struct sk_buff *skb,
1081 int tso_desc_type, bool is_ipv4,
1082 int tcp_hdr_len, int skb_net_hdr_len)
1084 d->dma.b11 = ETH_HLEN; /* MAC header length */
1085 d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
1087 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1088 /* L4 header len: TCP header length */
1089 d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1091 /* Setup TSO: bit and desc type */
1092 d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
1093 (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
1094 d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
1096 d->dma.ip_length = skb_net_hdr_len;
1097 /* Enable TCP/UDP checksum */
1098 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1099 /* Calculate pseudo-header */
1100 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1104 * Sets the descriptor @d up for csum. The corresponding
1105 * @skb is used to obtain the protocol and headers length.
1106 * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
1107 * Note, if d==NULL, the function only returns the protocol result.
1109 * It is very similar to previous wil_tx_desc_offload_setup_tso. This
1110 * is "if unrolling" to optimize the critical path.
1113 static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
1114 struct sk_buff *skb){
1117 if (skb->ip_summed != CHECKSUM_PARTIAL)
1120 d->dma.b11 = ETH_HLEN; /* MAC header length */
1122 switch (skb->protocol) {
1123 case cpu_to_be16(ETH_P_IP):
1124 protocol = ip_hdr(skb)->protocol;
1125 d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
1127 case cpu_to_be16(ETH_P_IPV6):
1128 protocol = ipv6_hdr(skb)->nexthdr;
1136 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1137 /* L4 header len: TCP header length */
1139 (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1142 /* L4 header len: UDP header length */
1144 (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1150 d->dma.ip_length = skb_network_header_len(skb);
1151 /* Enable TCP/UDP checksum */
1152 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1153 /* Calculate pseudo-header */
1154 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1159 static inline void wil_tx_last_desc(struct vring_tx_desc *d)
1161 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
1162 BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
1163 BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1166 static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
1168 d->dma.d0 |= wil_tso_type_lst <<
1169 DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
1172 static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
1173 struct sk_buff *skb)
1175 struct device *dev = wil_to_dev(wil);
1177 /* point to descriptors in shared memory */
1178 volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
1179 *_first_desc = NULL;
1181 /* pointers to shadow descriptors */
1182 struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
1183 *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
1184 *first_desc = &first_desc_mem;
1186 /* pointer to shadow descriptors' context */
1187 struct wil_ctx *hdr_ctx, *first_ctx = NULL;
1189 int descs_used = 0; /* total number of used descriptors */
1190 int sg_desc_cnt = 0; /* number of descriptors for current mss*/
1192 u32 swhead = vring->swhead;
1193 int used, avail = wil_vring_avail_tx(vring);
1194 int nr_frags = skb_shinfo(skb)->nr_frags;
1195 int min_desc_required = nr_frags + 1;
1196 int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
1197 int f, len, hdrlen, headlen;
1198 int vring_index = vring - wil->vring_tx;
1199 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
1202 const skb_frag_t *frag = NULL;
1205 int hdr_compensation_need = true;
1206 int desc_tso_type = wil_tso_type_first;
1209 int skb_net_hdr_len;
1212 wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
1213 __func__, skb->len, vring_index);
1215 if (unlikely(!txdata->enabled))
1218 /* A typical page 4K is 3-4 payloads, we assume each fragment
1219 * is a full payload, that's how min_desc_required has been
1220 * calculated. In real we might need more or less descriptors,
1221 * this is the initial check only.
1223 if (unlikely(avail < min_desc_required)) {
1224 wil_err_ratelimited(wil,
1225 "TSO: Tx ring[%2d] full. No space for %d fragments\n",
1226 vring_index, min_desc_required);
1230 /* Header Length = MAC header len + IP header len + TCP header len*/
1232 (int)skb_network_header_len(skb) +
1235 gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
1238 /* TCP v4, zero out the IP length and IPv4 checksum fields
1239 * as required by the offloading doc
1241 ip_hdr(skb)->tot_len = 0;
1242 ip_hdr(skb)->check = 0;
1246 /* TCP v6, zero out the payload length */
1247 ipv6_hdr(skb)->payload_len = 0;
1251 /* other than TCPv4 or TCPv6 types are not supported for TSO.
1252 * It is also illegal for both to be set simultaneously
1257 if (skb->ip_summed != CHECKSUM_PARTIAL)
1260 /* tcp header length and skb network header length are fixed for all
1261 * packet's descriptors - read then once here
1263 tcp_hdr_len = tcp_hdrlen(skb);
1264 skb_net_hdr_len = skb_network_header_len(skb);
1266 _hdr_desc = &vring->va[i].tx;
1268 pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
1269 if (unlikely(dma_mapping_error(dev, pa))) {
1270 wil_err(wil, "TSO: Skb head DMA map error\n");
1274 wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
1275 wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
1276 tcp_hdr_len, skb_net_hdr_len);
1277 wil_tx_last_desc(hdr_desc);
1279 vring->ctx[i].mapped_as = wil_mapped_as_single;
1280 hdr_ctx = &vring->ctx[i];
1283 headlen = skb_headlen(skb) - hdrlen;
1285 for (f = headlen ? -1 : 0; f < nr_frags; f++) {
1288 wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
1291 frag = &skb_shinfo(skb)->frags[f];
1293 wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
1298 "TSO: len %d, rem_data %d, descs_used %d\n",
1299 len, rem_data, descs_used);
1301 if (descs_used == avail) {
1302 wil_err(wil, "TSO: ring overflow\n");
1306 lenmss = min_t(int, rem_data, len);
1307 i = (swhead + descs_used) % vring->size;
1308 wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
1311 pa = skb_frag_dma_map(dev, frag,
1312 frag->size - len, lenmss,
1314 vring->ctx[i].mapped_as = wil_mapped_as_page;
1316 pa = dma_map_single(dev,
1318 skb_headlen(skb) - headlen,
1321 vring->ctx[i].mapped_as = wil_mapped_as_single;
1325 if (unlikely(dma_mapping_error(dev, pa)))
1328 _desc = &vring->va[i].tx;
1331 _first_desc = _desc;
1332 first_ctx = &vring->ctx[i];
1338 wil_tx_desc_map(d, pa, lenmss, vring_index);
1339 wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
1340 is_ipv4, tcp_hdr_len,
1343 /* use tso_type_first only once */
1344 desc_tso_type = wil_tso_type_mid;
1346 descs_used++; /* desc used so far */
1347 sg_desc_cnt++; /* desc used for this segment */
1352 "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
1353 len, rem_data, descs_used, sg_desc_cnt);
1355 /* Close the segment if reached mss size or last frag*/
1356 if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
1357 if (hdr_compensation_need) {
1358 /* first segment include hdr desc for
1361 hdr_ctx->nr_frags = sg_desc_cnt;
1362 wil_tx_desc_set_nr_frags(first_desc,
1365 hdr_compensation_need = false;
1367 wil_tx_desc_set_nr_frags(first_desc,
1370 first_ctx->nr_frags = sg_desc_cnt - 1;
1372 wil_tx_last_desc(d);
1374 /* first descriptor may also be the last
1375 * for this mss - make sure not to copy
1378 if (first_desc != d)
1379 *_first_desc = *first_desc;
1381 /*last descriptor will be copied at the end
1382 * of this TS processing
1384 if (f < nr_frags - 1 || len > 0)
1390 } else if (first_desc != d) /* update mid descriptor */
1395 /* first descriptor may also be the last.
1396 * in this case d pointer is invalid
1398 if (_first_desc == _desc)
1401 /* Last data descriptor */
1402 wil_set_tx_desc_last_tso(d);
1405 /* Fill the total number of descriptors in first desc (hdr)*/
1406 wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
1407 *_hdr_desc = *hdr_desc;
1409 /* hold reference to skb
1410 * to prevent skb release before accounting
1411 * in case of immediate "tx done"
1413 vring->ctx[i].skb = skb_get(skb);
1415 /* performance monitoring */
1416 used = wil_vring_used_tx(vring);
1417 if (wil_val_in_range(vring_idle_trsh,
1418 used, used + descs_used)) {
1419 txdata->idle += get_cycles() - txdata->last_idle;
1420 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1421 vring_index, used, used + descs_used);
1424 /* advance swhead */
1425 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
1426 wil_vring_advance_head(vring, descs_used);
1428 /* make sure all writes to descriptors (shared memory) are done before
1429 * committing them to HW
1433 wil_w(wil, vring->hwtail, vring->swhead);
1437 wil_err(wil, "TSO: DMA map page error\n");
1438 while (descs_used > 0) {
1439 struct wil_ctx *ctx;
1441 i = (swhead + descs_used) % vring->size;
1442 d = (struct vring_tx_desc *)&vring->va[i].tx;
1443 _desc = &vring->va[i].tx;
1445 _desc->dma.status = TX_DMA_STATUS_DU;
1446 ctx = &vring->ctx[i];
1447 wil_txdesc_unmap(dev, d, ctx);
1449 dev_kfree_skb_any(ctx->skb);
1450 memset(ctx, 0, sizeof(*ctx));
1458 static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
1459 struct sk_buff *skb)
1461 struct device *dev = wil_to_dev(wil);
1462 struct vring_tx_desc dd, *d = ⅆ
1463 volatile struct vring_tx_desc *_d;
1464 u32 swhead = vring->swhead;
1465 int avail = wil_vring_avail_tx(vring);
1466 int nr_frags = skb_shinfo(skb)->nr_frags;
1468 int vring_index = vring - wil->vring_tx;
1469 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
1473 bool mcast = (vring_index == wil->bcast_vring);
1474 uint len = skb_headlen(skb);
1476 wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
1477 __func__, skb->len, vring_index);
1479 if (unlikely(!txdata->enabled))
1482 if (unlikely(avail < 1 + nr_frags)) {
1483 wil_err_ratelimited(wil,
1484 "Tx ring[%2d] full. No space for %d fragments\n",
1485 vring_index, 1 + nr_frags);
1488 _d = &vring->va[i].tx;
1490 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
1492 wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
1493 skb_headlen(skb), skb->data, &pa);
1494 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
1495 skb->data, skb_headlen(skb), false);
1497 if (unlikely(dma_mapping_error(dev, pa)))
1499 vring->ctx[i].mapped_as = wil_mapped_as_single;
1501 wil_tx_desc_map(d, pa, len, vring_index);
1502 if (unlikely(mcast)) {
1503 d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
1504 if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
1505 d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
1507 /* Process TCP/UDP checksum offloading */
1508 if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
1509 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
1514 vring->ctx[i].nr_frags = nr_frags;
1515 wil_tx_desc_set_nr_frags(d, nr_frags + 1);
1517 /* middle segments */
1518 for (; f < nr_frags; f++) {
1519 const struct skb_frag_struct *frag =
1520 &skb_shinfo(skb)->frags[f];
1521 int len = skb_frag_size(frag);
1524 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
1525 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1526 (const void *)d, sizeof(*d), false);
1527 i = (swhead + f + 1) % vring->size;
1528 _d = &vring->va[i].tx;
1529 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
1531 if (unlikely(dma_mapping_error(dev, pa)))
1533 vring->ctx[i].mapped_as = wil_mapped_as_page;
1534 wil_tx_desc_map(d, pa, len, vring_index);
1535 /* no need to check return code -
1536 * if it succeeded for 1-st descriptor,
1537 * it will succeed here too
1539 wil_tx_desc_offload_setup(d, skb);
1541 /* for the last seg only */
1542 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
1543 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
1544 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1546 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
1547 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1548 (const void *)d, sizeof(*d), false);
1550 /* hold reference to skb
1551 * to prevent skb release before accounting
1552 * in case of immediate "tx done"
1554 vring->ctx[i].skb = skb_get(skb);
1556 /* performance monitoring */
1557 used = wil_vring_used_tx(vring);
1558 if (wil_val_in_range(vring_idle_trsh,
1559 used, used + nr_frags + 1)) {
1560 txdata->idle += get_cycles() - txdata->last_idle;
1561 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1562 vring_index, used, used + nr_frags + 1);
1565 /* advance swhead */
1566 wil_vring_advance_head(vring, nr_frags + 1);
1567 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
1569 trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
1571 /* make sure all writes to descriptors (shared memory) are done before
1572 * committing them to HW
1576 wil_w(wil, vring->hwtail, vring->swhead);
1580 /* unmap what we have mapped */
1581 nr_frags = f + 1; /* frags mapped + one for skb head */
1582 for (f = 0; f < nr_frags; f++) {
1583 struct wil_ctx *ctx;
1585 i = (swhead + f) % vring->size;
1586 ctx = &vring->ctx[i];
1587 _d = &vring->va[i].tx;
1589 _d->dma.status = TX_DMA_STATUS_DU;
1590 wil_txdesc_unmap(dev, d, ctx);
1593 dev_kfree_skb_any(ctx->skb);
1595 memset(ctx, 0, sizeof(*ctx));
1601 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
1602 struct sk_buff *skb)
1604 int vring_index = vring - wil->vring_tx;
1605 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
1608 spin_lock(&txdata->lock);
1610 rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
1613 spin_unlock(&txdata->lock);
1618 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1620 struct wil6210_priv *wil = ndev_to_wil(ndev);
1621 struct ethhdr *eth = (void *)skb->data;
1622 bool bcast = is_multicast_ether_addr(eth->h_dest);
1623 struct vring *vring;
1624 static bool pr_once_fw;
1627 wil_dbg_txrx(wil, "%s()\n", __func__);
1628 if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
1630 wil_err(wil, "FW not ready\n");
1635 if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
1636 wil_err(wil, "FW not connected\n");
1639 if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
1640 wil_err(wil, "Xmit in monitor mode not supported\n");
1646 if (wil->wdev->iftype == NL80211_IFTYPE_STATION) {
1647 /* in STA mode (ESS), all to same VRING */
1648 vring = wil_find_tx_vring_sta(wil, skb);
1649 } else { /* direct communication, find matching VRING */
1650 vring = bcast ? wil_find_tx_bcast(wil, skb) :
1651 wil_find_tx_ucast(wil, skb);
1653 if (unlikely(!vring)) {
1654 wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
1657 /* set up vring entry */
1658 rc = wil_tx_vring(wil, vring, skb);
1660 /* do we still have enough room in the vring? */
1661 if (unlikely(wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))) {
1662 netif_tx_stop_all_queues(wil_to_ndev(wil));
1663 wil_dbg_txrx(wil, "netif_tx_stop : ring full\n");
1668 /* statistics will be updated on the tx_complete */
1669 dev_kfree_skb_any(skb);
1670 return NETDEV_TX_OK;
1672 return NETDEV_TX_BUSY;
1674 break; /* goto drop; */
1677 ndev->stats.tx_dropped++;
1678 dev_kfree_skb_any(skb);
1680 return NET_XMIT_DROP;
1683 static inline bool wil_need_txstat(struct sk_buff *skb)
1685 struct ethhdr *eth = (void *)skb->data;
1687 return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
1688 (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
1691 static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
1693 if (unlikely(wil_need_txstat(skb)))
1694 skb_complete_wifi_ack(skb, acked);
1696 acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
1700 * Clean up transmitted skb's from the Tx VRING
1702 * Return number of descriptors cleared
1704 * Safe to call from IRQ
1706 int wil_tx_complete(struct wil6210_priv *wil, int ringid)
1708 struct net_device *ndev = wil_to_ndev(wil);
1709 struct device *dev = wil_to_dev(wil);
1710 struct vring *vring = &wil->vring_tx[ringid];
1711 struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
1713 int cid = wil->vring2cid_tid[ringid][0];
1714 struct wil_net_stats *stats = NULL;
1715 volatile struct vring_tx_desc *_d;
1716 int used_before_complete;
1719 if (unlikely(!vring->va)) {
1720 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
1724 if (unlikely(!txdata->enabled)) {
1725 wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
1729 wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
1731 used_before_complete = wil_vring_used_tx(vring);
1733 if (cid < WIL6210_MAX_CID)
1734 stats = &wil->sta[cid].stats;
1736 while (!wil_vring_is_empty(vring)) {
1738 struct wil_ctx *ctx = &vring->ctx[vring->swtail];
1740 * For the fragmented skb, HW will set DU bit only for the
1741 * last fragment. look for it.
1742 * In TSO the first DU will include hdr desc
1744 int lf = (vring->swtail + ctx->nr_frags) % vring->size;
1745 /* TODO: check we are not past head */
1747 _d = &vring->va[lf].tx;
1748 if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
1751 new_swtail = (lf + 1) % vring->size;
1752 while (vring->swtail != new_swtail) {
1753 struct vring_tx_desc dd, *d = ⅆ
1755 struct sk_buff *skb;
1757 ctx = &vring->ctx[vring->swtail];
1759 _d = &vring->va[vring->swtail].tx;
1763 dmalen = le16_to_cpu(d->dma.length);
1764 trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
1767 "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
1768 ringid, vring->swtail, dmalen,
1769 d->dma.status, d->dma.error);
1770 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
1771 (const void *)d, sizeof(*d), false);
1773 wil_txdesc_unmap(dev, d, ctx);
1776 if (likely(d->dma.error == 0)) {
1777 ndev->stats.tx_packets++;
1778 ndev->stats.tx_bytes += skb->len;
1780 stats->tx_packets++;
1781 stats->tx_bytes += skb->len;
1784 ndev->stats.tx_errors++;
1788 wil_consume_skb(skb, d->dma.error == 0);
1790 memset(ctx, 0, sizeof(*ctx));
1791 /* There is no need to touch HW descriptor:
1792 * - ststus bit TX_DMA_STATUS_DU is set by design,
1793 * so hardware will not try to process this desc.,
1794 * - rest of descriptor will be initialized on Tx.
1796 vring->swtail = wil_vring_next_tail(vring);
1801 /* performance monitoring */
1802 used_new = wil_vring_used_tx(vring);
1803 if (wil_val_in_range(vring_idle_trsh,
1804 used_new, used_before_complete)) {
1805 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
1806 ringid, used_before_complete, used_new);
1807 txdata->last_idle = get_cycles();
1810 if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring)) {
1811 wil_dbg_txrx(wil, "netif_tx_wake : ring not full\n");
1812 netif_tx_wake_all_queues(wil_to_ndev(wil));