2 * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/etherdevice.h>
18 #include <linux/moduleparam.h>
19 #include <linux/prefetch.h>
20 #include <linux/types.h>
21 #include <linux/list.h>
23 #include <linux/ipv6.h>
25 #include "txrx_edma.h"
29 #define WIL_EDMA_MAX_DATA_OFFSET (2)
30 /* RX buffer size must be aligned to 4 bytes */
31 #define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
33 static void wil_tx_desc_unmap_edma(struct device *dev,
34 union wil_tx_desc *desc,
37 struct wil_tx_enhanced_desc *d = (struct wil_tx_enhanced_desc *)desc;
38 dma_addr_t pa = wil_tx_desc_get_addr_edma(&d->dma);
39 u16 dmalen = le16_to_cpu(d->dma.length);
41 switch (ctx->mapped_as) {
42 case wil_mapped_as_single:
43 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
45 case wil_mapped_as_page:
46 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
53 static int wil_find_free_sring(struct wil6210_priv *wil)
57 for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++) {
58 if (!wil->srings[i].va)
65 static void wil_sring_free(struct wil6210_priv *wil,
66 struct wil_status_ring *sring)
68 struct device *dev = wil_to_dev(wil);
71 if (!sring || !sring->va)
74 sz = sring->elem_size * sring->size;
76 wil_dbg_misc(wil, "status_ring_free, size(bytes)=%zu, 0x%p:%pad\n",
77 sz, sring->va, &sring->pa);
79 dma_free_coherent(dev, sz, (void *)sring->va, sring->pa);
84 static int wil_sring_alloc(struct wil6210_priv *wil,
85 struct wil_status_ring *sring)
87 struct device *dev = wil_to_dev(wil);
88 size_t sz = sring->elem_size * sring->size;
90 wil_dbg_misc(wil, "status_ring_alloc: size=%zu\n", sz);
93 wil_err(wil, "Cannot allocate a zero size status ring\n");
99 /* Status messages are allocated and initialized to 0. This is necessary
100 * since DR bit should be initialized to 0.
102 sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
106 wil_dbg_misc(wil, "status_ring[%d] 0x%p:%pad\n", sring->size, sring->va,
112 static int wil_tx_init_edma(struct wil6210_priv *wil)
114 int ring_id = wil_find_free_sring(wil);
115 struct wil_status_ring *sring;
117 u16 status_ring_size;
119 if (wil->tx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
120 wil->tx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
121 wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
123 status_ring_size = 1 << wil->tx_status_ring_order;
125 wil_dbg_misc(wil, "init TX sring: size=%u, ring_id=%u\n",
126 status_ring_size, ring_id);
131 /* Allocate Tx status ring. Tx descriptor rings will be
132 * allocated on WMI connect event
134 sring = &wil->srings[ring_id];
136 sring->is_rx = false;
137 sring->size = status_ring_size;
138 sring->elem_size = sizeof(struct wil_ring_tx_status);
139 rc = wil_sring_alloc(wil, sring);
143 rc = wil_wmi_tx_sring_cfg(wil, ring_id);
147 sring->desc_rdy_pol = 1;
148 wil->tx_sring_idx = ring_id;
152 wil_sring_free(wil, sring);
157 * Allocate one skb for Rx descriptor RING
159 static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil,
160 struct wil_ring *ring, u32 i)
162 struct device *dev = wil_to_dev(wil);
163 unsigned int sz = wil->rx_buf_len;
166 struct list_head *active = &wil->rx_buff_mgmt.active;
167 struct list_head *free = &wil->rx_buff_mgmt.free;
168 struct wil_rx_buff *rx_buff;
169 struct wil_rx_buff *buff_arr = wil->rx_buff_mgmt.buff_arr;
171 struct wil_rx_enhanced_desc dd, *d = ⅆ
172 struct wil_rx_enhanced_desc *_d = (struct wil_rx_enhanced_desc *)
173 &ring->va[i].rx.enhanced;
175 if (unlikely(list_empty(free))) {
176 wil->rx_buff_mgmt.free_list_empty_cnt++;
180 skb = dev_alloc_skb(sz);
187 * Make sure that the network stack calculates checksum for packets
188 * which failed the HW checksum calculation
190 skb->ip_summed = CHECKSUM_NONE;
192 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
193 if (unlikely(dma_mapping_error(dev, pa))) {
198 /* Get the buffer ID - the index of the rx buffer in the buff_arr */
199 rx_buff = list_first_entry(free, struct wil_rx_buff, list);
200 buff_id = rx_buff->id;
202 /* Move a buffer from the free list to the active list */
203 list_move(&rx_buff->list, active);
205 buff_arr[buff_id].skb = skb;
207 wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
208 d->dma.length = cpu_to_le16(sz);
209 d->mac.buff_id = cpu_to_le16(buff_id);
212 /* Save the physical address in skb->cb for later use in dma_unmap */
213 memcpy(skb->cb, &pa, sizeof(pa));
219 void wil_get_next_rx_status_msg(struct wil_status_ring *sring, void *msg)
221 memcpy(msg, (void *)(sring->va + (sring->elem_size * sring->swhead)),
225 static inline void wil_sring_advance_swhead(struct wil_status_ring *sring)
227 sring->swhead = (sring->swhead + 1) % sring->size;
228 if (sring->swhead == 0)
229 sring->desc_rdy_pol = 1 - sring->desc_rdy_pol;
232 static int wil_rx_refill_edma(struct wil6210_priv *wil)
234 struct wil_ring *ring = &wil->ring_rx;
237 ring->swtail = *ring->edma_rx_swtail.va;
239 for (; next_head = wil_ring_next_head(ring),
240 (next_head != ring->swtail);
241 ring->swhead = next_head) {
242 rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead);
245 wil_dbg_txrx(wil, "No free buffer ID found\n");
247 wil_err_ratelimited(wil,
248 "Error %d in refill desc[%d]\n",
254 /* make sure all writes to descriptors (shared memory) are done before
255 * committing them to HW
259 wil_w(wil, ring->hwtail, ring->swhead);
264 static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
265 struct wil_ring *ring)
267 struct device *dev = wil_to_dev(wil);
268 struct list_head *active = &wil->rx_buff_mgmt.active;
271 while (!list_empty(active)) {
272 struct wil_rx_buff *rx_buff =
273 list_first_entry(active, struct wil_rx_buff, list);
274 struct sk_buff *skb = rx_buff->skb;
276 if (unlikely(!skb)) {
277 wil_err(wil, "No Rx skb at buff_id %d\n", rx_buff->id);
280 memcpy(&pa, skb->cb, sizeof(pa));
281 dma_unmap_single(dev, pa, wil->rx_buf_len,
286 /* Move the buffer from the active to the free list */
287 list_move(&rx_buff->list, &wil->rx_buff_mgmt.free);
291 static void wil_free_rx_buff_arr(struct wil6210_priv *wil)
293 struct wil_ring *ring = &wil->ring_rx;
295 if (!wil->rx_buff_mgmt.buff_arr)
298 /* Move all the buffers to the free list in case active list is
299 * not empty in order to release all SKBs before deleting the array
301 wil_move_all_rx_buff_to_free_list(wil, ring);
303 kfree(wil->rx_buff_mgmt.buff_arr);
304 wil->rx_buff_mgmt.buff_arr = NULL;
307 static int wil_init_rx_buff_arr(struct wil6210_priv *wil,
310 struct wil_rx_buff *buff_arr;
311 struct list_head *active = &wil->rx_buff_mgmt.active;
312 struct list_head *free = &wil->rx_buff_mgmt.free;
315 wil->rx_buff_mgmt.buff_arr = kcalloc(size, sizeof(struct wil_rx_buff),
317 if (!wil->rx_buff_mgmt.buff_arr)
321 INIT_LIST_HEAD(active);
322 INIT_LIST_HEAD(free);
324 /* Linkify the list */
325 buff_arr = wil->rx_buff_mgmt.buff_arr;
326 for (i = 0; i < size; i++) {
327 list_add(&buff_arr[i].list, free);
331 wil->rx_buff_mgmt.size = size;
336 static int wil_init_rx_sring(struct wil6210_priv *wil,
337 u16 status_ring_size,
341 struct wil_status_ring *sring = &wil->srings[ring_id];
344 wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n",
345 status_ring_size, ring_id);
347 memset(&sring->rx_data, 0, sizeof(sring->rx_data));
350 sring->size = status_ring_size;
351 sring->elem_size = elem_size;
352 rc = wil_sring_alloc(wil, sring);
356 rc = wil_wmi_rx_sring_add(wil, ring_id);
360 sring->desc_rdy_pol = 1;
364 wil_sring_free(wil, sring);
368 static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil,
369 struct wil_ring *ring)
371 struct device *dev = wil_to_dev(wil);
372 size_t sz = ring->size * sizeof(ring->va[0]);
374 wil_dbg_misc(wil, "alloc_desc_ring:\n");
376 BUILD_BUG_ON(sizeof(ring->va[0]) != 32);
380 ring->ctx = kcalloc(ring->size, sizeof(ring->ctx[0]), GFP_KERNEL);
384 ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
389 sz = sizeof(*ring->edma_rx_swtail.va);
390 ring->edma_rx_swtail.va =
391 dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
393 if (!ring->edma_rx_swtail.va)
397 wil_dbg_misc(wil, "%s ring[%d] 0x%p:%pad 0x%p\n",
398 ring->is_rx ? "RX" : "TX",
399 ring->size, ring->va, &ring->pa, ring->ctx);
403 dma_free_coherent(dev, ring->size * sizeof(ring->va[0]),
404 (void *)ring->va, ring->pa);
413 static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring)
415 struct device *dev = wil_to_dev(wil);
422 sz = ring->size * sizeof(ring->va[0]);
424 lockdep_assert_held(&wil->mutex);
426 wil_dbg_misc(wil, "free Rx ring [%d] 0x%p:%pad 0x%p\n",
427 ring->size, ring->va,
428 &ring->pa, ring->ctx);
430 wil_move_all_rx_buff_to_free_list(wil, ring);
435 ring_index = ring - wil->ring_tx;
437 wil_dbg_misc(wil, "free Tx ring %d [%d] 0x%p:%pad 0x%p\n",
438 ring_index, ring->size, ring->va,
439 &ring->pa, ring->ctx);
441 while (!wil_ring_is_empty(ring)) {
444 struct wil_tx_enhanced_desc dd, *d = ⅆ
445 struct wil_tx_enhanced_desc *_d =
446 (struct wil_tx_enhanced_desc *)
447 &ring->va[ring->swtail].tx.enhanced;
449 ctx = &ring->ctx[ring->swtail];
452 "ctx(%d) was already completed\n",
454 ring->swtail = wil_ring_next_tail(ring);
458 wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
460 dev_kfree_skb_any(ctx->skb);
461 ring->swtail = wil_ring_next_tail(ring);
465 dma_free_coherent(dev, sz, (void *)ring->va, ring->pa);
472 static int wil_init_rx_desc_ring(struct wil6210_priv *wil, u16 desc_ring_size,
475 struct wil_ring *ring = &wil->ring_rx;
478 wil_dbg_misc(wil, "init RX desc ring\n");
480 ring->size = desc_ring_size;
482 rc = wil_ring_alloc_desc_ring(wil, ring);
486 rc = wil_wmi_rx_desc_ring_add(wil, status_ring_id);
492 wil_ring_free_edma(wil, ring);
496 static void wil_get_reorder_params_edma(struct wil6210_priv *wil,
497 struct sk_buff *skb, int *tid,
498 int *cid, int *mid, u16 *seq,
499 int *mcast, int *retry)
501 struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
503 *tid = wil_rx_status_get_tid(s);
504 *cid = wil_rx_status_get_cid(s);
505 *mid = wil_rx_status_get_mid(s);
506 *seq = le16_to_cpu(wil_rx_status_get_seq(wil, s));
507 *mcast = wil_rx_status_get_mcast(s);
508 *retry = wil_rx_status_get_retry(s);
511 static void wil_get_netif_rx_params_edma(struct sk_buff *skb, int *cid,
514 struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
516 *cid = wil_rx_status_get_cid(s);
517 *security = wil_rx_status_get_security(s);
520 static int wil_rx_crypto_check_edma(struct wil6210_priv *wil,
523 struct wil_rx_status_extended *st;
524 int cid, tid, key_id, mc;
525 struct wil_sta_info *s;
526 struct wil_tid_crypto_rx *c;
527 struct wil_tid_crypto_rx_single *cc;
530 /* In HW reorder, HW is responsible for crypto check */
531 if (wil->use_rx_hw_reordering)
534 st = wil_skb_rxstatus(skb);
536 cid = wil_rx_status_get_cid(st);
537 tid = wil_rx_status_get_tid(st);
538 key_id = wil_rx_status_get_key_id(st);
539 mc = wil_rx_status_get_mcast(st);
541 c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid];
542 cc = &c->key_id[key_id];
543 pn = (u8 *)&st->ext.pn_15_0;
546 wil_err_ratelimited(wil,
547 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
548 cid, tid, mc, key_id);
552 if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
553 wil_err_ratelimited(wil,
554 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
555 cid, tid, mc, key_id, pn, cc->pn);
558 memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
563 static bool wil_is_rx_idle_edma(struct wil6210_priv *wil)
565 struct wil_status_ring *sring;
566 struct wil_rx_status_extended msg1;
571 for (i = 0; i < wil->num_rx_status_rings; i++) {
572 sring = &wil->srings[i];
576 wil_get_next_rx_status_msg(sring, msg);
577 dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
579 /* Check if there are unhandled RX status messages */
580 if (dr_bit == sring->desc_rdy_pol)
587 static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil)
589 /* RX buffer size must be aligned to 4 bytes */
590 wil->rx_buf_len = rx_large_buf ?
591 WIL_MAX_ETH_MTU : WIL_EDMA_RX_BUF_LEN_DEFAULT;
594 static int wil_rx_init_edma(struct wil6210_priv *wil, uint desc_ring_order)
596 u16 status_ring_size, desc_ring_size = 1 << desc_ring_order;
597 struct wil_ring *ring = &wil->ring_rx;
599 size_t elem_size = wil->use_compressed_rx_status ?
600 sizeof(struct wil_rx_status_compressed) :
601 sizeof(struct wil_rx_status_extended);
604 /* In SW reorder one must use extended status messages */
605 if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) {
607 "compressed RX status cannot be used with SW reorder\n");
610 if (wil->rx_status_ring_order <= desc_ring_order)
611 /* make sure sring is larger than desc ring */
612 wil->rx_status_ring_order = desc_ring_order + 1;
613 if (wil->rx_buff_id_count <= desc_ring_size)
614 /* make sure we will not run out of buff_ids */
615 wil->rx_buff_id_count = desc_ring_size + 512;
616 if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
617 wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
618 wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
620 status_ring_size = 1 << wil->rx_status_ring_order;
623 "rx_init, desc_ring_size=%u, status_ring_size=%u, elem_size=%zu\n",
624 desc_ring_size, status_ring_size, elem_size);
626 wil_rx_buf_len_init_edma(wil);
628 /* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */
629 if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1)
630 wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1;
632 wil_dbg_misc(wil, "rx_init: allocate %d status rings\n",
633 wil->num_rx_status_rings);
635 rc = wil_wmi_cfg_def_rx_offload(wil, wil->rx_buf_len);
639 /* Allocate status ring */
640 for (i = 0; i < wil->num_rx_status_rings; i++) {
641 int sring_id = wil_find_free_sring(wil);
645 goto err_free_status;
647 rc = wil_init_rx_sring(wil, status_ring_size, elem_size,
650 goto err_free_status;
653 /* Allocate descriptor ring */
654 rc = wil_init_rx_desc_ring(wil, desc_ring_size,
655 WIL_DEFAULT_RX_STATUS_RING_ID);
657 goto err_free_status;
659 if (wil->rx_buff_id_count >= status_ring_size) {
661 "rx_buff_id_count %d exceeds sring_size %d. set it to %d\n",
662 wil->rx_buff_id_count, status_ring_size,
663 status_ring_size - 1);
664 wil->rx_buff_id_count = status_ring_size - 1;
667 /* Allocate Rx buffer array */
668 rc = wil_init_rx_buff_arr(wil, wil->rx_buff_id_count);
672 /* Fill descriptor ring with credits */
673 rc = wil_rx_refill_edma(wil);
675 goto err_free_rx_buff_arr;
678 err_free_rx_buff_arr:
679 wil_free_rx_buff_arr(wil);
681 wil_ring_free_edma(wil, ring);
683 for (i = 0; i < wil->num_rx_status_rings; i++)
684 wil_sring_free(wil, &wil->srings[i]);
689 static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
690 int size, int cid, int tid)
692 struct wil6210_priv *wil = vif_to_wil(vif);
694 struct wil_ring *ring = &wil->ring_tx[ring_id];
695 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
697 lockdep_assert_held(&wil->mutex);
700 "init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n",
701 ring_id, cid, tid, wil->tx_sring_idx);
703 wil_tx_data_init(txdata);
705 rc = wil_ring_alloc_desc_ring(wil, ring);
709 wil->ring2cid_tid[ring_id][0] = cid;
710 wil->ring2cid_tid[ring_id][1] = tid;
712 txdata->dot1x_open = true;
714 rc = wil_wmi_tx_desc_ring_add(vif, ring_id, cid, tid);
716 wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed\n");
720 if (txdata->dot1x_open && agg_wsize >= 0)
721 wil_addba_tx_request(wil, ring_id, agg_wsize);
725 spin_lock_bh(&txdata->lock);
726 txdata->dot1x_open = false;
728 spin_unlock_bh(&txdata->lock);
729 wil_ring_free_edma(wil, ring);
730 wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID;
731 wil->ring2cid_tid[ring_id][1] = 0;
737 static int wil_tx_ring_modify_edma(struct wil6210_vif *vif, int ring_id,
740 struct wil6210_priv *wil = vif_to_wil(vif);
742 wil_err(wil, "ring modify is not supported for EDMA\n");
747 /* This function is used only for RX SW reorder */
748 static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid,
749 struct sk_buff *skb, struct wil_net_stats *stats)
756 struct wil6210_vif *vif;
758 ftype = wil_rx_status_get_frame_type(wil, msg);
759 if (ftype == IEEE80211_FTYPE_DATA)
762 fc1 = wil_rx_status_get_fc1(wil, msg);
763 mid = wil_rx_status_get_mid(msg);
764 tid = wil_rx_status_get_tid(msg);
765 seq = le16_to_cpu(wil_rx_status_get_seq(wil, msg));
766 vif = wil->vifs[mid];
768 if (unlikely(!vif)) {
769 wil_dbg_txrx(wil, "RX descriptor with invalid mid %d", mid);
774 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
775 fc1, mid, cid, tid, seq);
777 stats->rx_non_data_frame++;
778 if (wil_is_back_req(fc1)) {
780 "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
782 wil_rx_bar(wil, vif, cid, tid, seq);
784 u32 sz = wil->use_compressed_rx_status ?
785 sizeof(struct wil_rx_status_compressed) :
786 sizeof(struct wil_rx_status_extended);
788 /* print again all info. One can enable only this
789 * without overhead for printing every Rx frame
792 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
793 fc1, mid, cid, tid, seq);
794 wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
795 (const void *)msg, sz, false);
796 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
797 skb->data, skb_headlen(skb), false);
803 static int wil_rx_error_check_edma(struct wil6210_priv *wil,
805 struct wil_net_stats *stats)
811 void *msg = wil_skb_rxstatus(skb);
813 error = wil_rx_status_get_error(msg);
815 skb->ip_summed = CHECKSUM_UNNECESSARY;
819 l2_rx_status = wil_rx_status_get_l2_rx_status(msg);
820 if (l2_rx_status != 0) {
821 wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n",
823 /* Due to HW issue, KEY error will trigger a MIC error */
824 if (l2_rx_status == WIL_RX_EDMA_ERROR_MIC) {
825 wil_err_ratelimited(wil,
826 "L2 MIC/KEY error, dropping packet\n");
827 stats->rx_mic_error++;
829 if (l2_rx_status == WIL_RX_EDMA_ERROR_KEY) {
830 wil_err_ratelimited(wil,
831 "L2 KEY error, dropping packet\n");
832 stats->rx_key_error++;
834 if (l2_rx_status == WIL_RX_EDMA_ERROR_REPLAY) {
835 wil_err_ratelimited(wil,
836 "L2 REPLAY error, dropping packet\n");
839 if (l2_rx_status == WIL_RX_EDMA_ERROR_AMSDU) {
840 wil_err_ratelimited(wil,
841 "L2 AMSDU error, dropping packet\n");
842 stats->rx_amsdu_error++;
847 l3_rx_status = wil_rx_status_get_l3_rx_status(msg);
848 l4_rx_status = wil_rx_status_get_l4_rx_status(msg);
849 if (!l3_rx_status && !l4_rx_status)
850 skb->ip_summed = CHECKSUM_UNNECESSARY;
851 /* If HW reports bad checksum, let IP stack re-check it
852 * For example, HW don't understand Microsoft IP stack that
853 * mis-calculates TCP checksum - if it should be 0x0,
854 * it writes 0xffff in violation of RFC 1624
857 stats->rx_csum_err++;
862 static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
863 struct wil_status_ring *sring)
865 struct device *dev = wil_to_dev(wil);
866 struct wil_rx_status_extended msg1;
871 struct wil_ring_rx_data *rxdata = &sring->rx_data;
872 unsigned int sz = wil->rx_buf_len;
873 struct wil_net_stats *stats = NULL;
876 bool eop, headstolen;
880 struct wil_rx_status_extended *s;
881 u16 sring_idx = sring - wil->srings;
883 BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
886 wil_get_next_rx_status_msg(sring, msg);
887 dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
889 /* Completed handling all the ready status messages */
890 if (dr_bit != sring->desc_rdy_pol)
893 /* Extract the buffer ID from the status message */
894 buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
895 if (unlikely(!wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))) {
896 wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
897 buff_id, sring->swhead);
898 wil_sring_advance_swhead(sring);
902 wil_sring_advance_swhead(sring);
904 /* Extract the SKB from the rx_buff management array */
905 skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
906 wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
908 wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
909 /* Move the buffer from the active list to the free list */
910 list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
911 &wil->rx_buff_mgmt.free);
915 memcpy(&pa, skb->cb, sizeof(pa));
916 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
917 dmalen = le16_to_cpu(wil_rx_status_get_length(msg));
919 trace_wil6210_rx_status(wil, wil->use_compressed_rx_status, buff_id,
921 wil_dbg_txrx(wil, "Rx, buff_id=%u, sring_idx=%u, dmalen=%u bytes\n",
922 buff_id, sring_idx, dmalen);
923 wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
924 (const void *)msg, wil->use_compressed_rx_status ?
925 sizeof(struct wil_rx_status_compressed) :
926 sizeof(struct wil_rx_status_extended), false);
928 /* Move the buffer from the active list to the free list */
929 list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
930 &wil->rx_buff_mgmt.free);
932 eop = wil_rx_status_get_eop(msg);
934 cid = wil_rx_status_get_cid(msg);
935 if (unlikely(!wil_val_in_range(cid, 0, WIL6210_MAX_CID))) {
936 wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n",
938 rxdata->skipping = true;
941 stats = &wil->sta[cid].stats;
943 if (unlikely(skb->len < ETH_HLEN)) {
944 wil_dbg_txrx(wil, "Short frame, len = %d\n", skb->len);
945 stats->rx_short_frame++;
946 rxdata->skipping = true;
950 if (unlikely(dmalen > sz)) {
951 wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
952 stats->rx_large_frame++;
953 rxdata->skipping = true;
957 /* skipping indicates if a certain SKB should be dropped.
958 * It is set in case there is an error on the current SKB or in case
959 * of RX chaining: as long as we manage to merge the SKBs it will
960 * be false. once we have a bad SKB or we don't manage to merge SKBs
961 * it will be set to the !EOP value of the current SKB.
962 * This guarantees that all the following SKBs until EOP will also
965 if (unlikely(rxdata->skipping)) {
968 kfree_skb(rxdata->skb);
971 rxdata->skipping = !eop;
975 skb_trim(skb, dmalen);
982 if (likely(skb_try_coalesce(rxdata->skb, skb, &headstolen,
984 kfree_skb_partial(skb, headstolen);
986 wil_err(wil, "failed to merge skbs!\n");
988 kfree_skb(rxdata->skb);
990 rxdata->skipping = !eop;
998 /* reaching here rxdata->skb always contains a full packet */
1001 rxdata->skipping = false;
1004 stats->last_mcs_rx = wil_rx_status_get_mcs(msg);
1005 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
1006 stats->rx_per_mcs[stats->last_mcs_rx]++;
1009 if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status &&
1010 wil_check_bar(wil, msg, cid, skb, stats) == -EAGAIN) {
1015 /* Compensate for the HW data alignment according to the status
1018 data_offset = wil_rx_status_get_data_offset(msg);
1019 if (data_offset == 0xFF ||
1020 data_offset > WIL_EDMA_MAX_DATA_OFFSET) {
1021 wil_err(wil, "Unexpected data offset %d\n", data_offset);
1026 skb_pull(skb, data_offset);
1028 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
1029 skb->data, skb_headlen(skb), false);
1031 /* Has to be done after dma_unmap_single as skb->cb is also
1032 * used for holding the pa
1034 s = wil_skb_rxstatus(skb);
1035 memcpy(s, msg, sring->elem_size);
1040 void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota)
1042 struct net_device *ndev;
1043 struct wil_ring *ring = &wil->ring_rx;
1044 struct wil_status_ring *sring;
1045 struct sk_buff *skb;
1048 if (unlikely(!ring->va)) {
1049 wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
1052 wil_dbg_txrx(wil, "rx_handle\n");
1054 for (i = 0; i < wil->num_rx_status_rings; i++) {
1055 sring = &wil->srings[i];
1056 if (unlikely(!sring->va)) {
1058 "Rx IRQ while Rx status ring %d not yet initialized\n",
1063 while ((*quota > 0) &&
1065 wil_sring_reap_rx_edma(wil, sring)))) {
1067 if (wil->use_rx_hw_reordering) {
1068 void *msg = wil_skb_rxstatus(skb);
1069 int mid = wil_rx_status_get_mid(msg);
1070 struct wil6210_vif *vif = wil->vifs[mid];
1072 if (unlikely(!vif)) {
1074 "RX desc invalid mid %d",
1079 ndev = vif_to_ndev(vif);
1080 wil_netif_rx_any(skb, ndev);
1082 wil_rx_reorder(wil, skb);
1086 wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
1089 wil_rx_refill_edma(wil);
1092 static int wil_tx_desc_map_edma(union wil_tx_desc *desc,
1097 struct wil_tx_enhanced_desc *d =
1098 (struct wil_tx_enhanced_desc *)&desc->enhanced;
1100 memset(d, 0, sizeof(struct wil_tx_enhanced_desc));
1102 wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
1104 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
1105 d->dma.length = cpu_to_le16((u16)len);
1106 d->mac.d[0] = (ring_index << WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS);
1107 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi;
1110 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
1111 (0x3 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
1117 wil_get_next_tx_status_msg(struct wil_status_ring *sring,
1118 struct wil_ring_tx_status *msg)
1120 struct wil_ring_tx_status *_msg = (struct wil_ring_tx_status *)
1121 (sring->va + (sring->elem_size * sring->swhead));
1127 * Clean up transmitted skb's from the Tx descriptor RING.
1128 * Return number of descriptors cleared.
1130 int wil_tx_sring_handler(struct wil6210_priv *wil,
1131 struct wil_status_ring *sring)
1133 struct net_device *ndev;
1134 struct device *dev = wil_to_dev(wil);
1135 struct wil_ring *ring = NULL;
1136 struct wil_ring_tx_data *txdata;
1137 /* Total number of completed descriptors in all descriptor rings */
1140 struct wil_net_stats *stats = NULL;
1141 struct wil_tx_enhanced_desc *_d;
1142 unsigned int ring_id;
1143 unsigned int num_descs;
1145 u8 dr_bit; /* Descriptor Ready bit */
1146 struct wil_ring_tx_status msg;
1147 struct wil6210_vif *vif;
1148 int used_before_complete;
1151 wil_get_next_tx_status_msg(sring, &msg);
1152 dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
1154 /* Process completion messages while DR bit has the expected polarity */
1155 while (dr_bit == sring->desc_rdy_pol) {
1156 num_descs = msg.num_descriptors;
1158 wil_err(wil, "invalid num_descs 0\n");
1162 /* Find the corresponding descriptor ring */
1163 ring_id = msg.ring_id;
1165 if (unlikely(ring_id >= WIL6210_MAX_TX_RINGS)) {
1166 wil_err(wil, "invalid ring id %d\n", ring_id);
1169 ring = &wil->ring_tx[ring_id];
1170 if (unlikely(!ring->va)) {
1171 wil_err(wil, "Tx irq[%d]: ring not initialized\n",
1175 txdata = &wil->ring_tx_data[ring_id];
1176 if (unlikely(!txdata->enabled)) {
1177 wil_info(wil, "Tx irq[%d]: ring disabled\n", ring_id);
1180 vif = wil->vifs[txdata->mid];
1181 if (unlikely(!vif)) {
1182 wil_dbg_txrx(wil, "invalid MID %d for ring %d\n",
1183 txdata->mid, ring_id);
1187 ndev = vif_to_ndev(vif);
1189 cid = wil->ring2cid_tid[ring_id][0];
1190 if (cid < WIL6210_MAX_CID)
1191 stats = &wil->sta[cid].stats;
1194 "tx_status: completed desc_ring (%d), num_descs (%d)\n",
1195 ring_id, num_descs);
1197 used_before_complete = wil_ring_used_tx(ring);
1199 for (i = 0 ; i < num_descs; ++i) {
1200 struct wil_ctx *ctx = &ring->ctx[ring->swtail];
1201 struct wil_tx_enhanced_desc dd, *d = ⅆ
1203 struct sk_buff *skb = ctx->skb;
1205 _d = (struct wil_tx_enhanced_desc *)
1206 &ring->va[ring->swtail].tx.enhanced;
1209 dmalen = le16_to_cpu(d->dma.length);
1210 trace_wil6210_tx_status(&msg, ring->swtail, dmalen);
1212 "TxC[%2d][%3d] : %d bytes, status 0x%02x\n",
1213 ring_id, ring->swtail, dmalen,
1215 wil_hex_dump_txrx("TxS ", DUMP_PREFIX_NONE, 32, 4,
1216 (const void *)&msg, sizeof(msg),
1219 wil_tx_desc_unmap_edma(dev,
1220 (union wil_tx_desc *)d,
1224 if (likely(msg.status == 0)) {
1225 ndev->stats.tx_packets++;
1226 ndev->stats.tx_bytes += skb->len;
1228 stats->tx_packets++;
1229 stats->tx_bytes += skb->len;
1231 wil_tx_latency_calc(wil, skb,
1235 ndev->stats.tx_errors++;
1239 wil_consume_skb(skb, msg.status == 0);
1241 memset(ctx, 0, sizeof(*ctx));
1242 /* Make sure the ctx is zeroed before updating the tail
1243 * to prevent a case where wil_tx_ring will see
1244 * this descriptor as used and handle it before ctx zero
1249 ring->swtail = wil_ring_next_tail(ring);
1254 /* performance monitoring */
1255 used_new = wil_ring_used_tx(ring);
1256 if (wil_val_in_range(wil->ring_idle_trsh,
1257 used_new, used_before_complete)) {
1258 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
1259 ring_id, used_before_complete, used_new);
1260 txdata->last_idle = get_cycles();
1264 wil_sring_advance_swhead(sring);
1266 wil_get_next_tx_status_msg(sring, &msg);
1267 dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
1270 /* shall we wake net queues? */
1272 wil_update_net_queues(wil, vif, NULL, false);
1274 /* Update the HW tail ptr (RD ptr) */
1275 wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
1281 * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
1282 * @skb is used to obtain the protocol and headers length.
1283 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
1284 * 2 - middle, 3 - last descriptor.
1286 static void wil_tx_desc_offload_setup_tso_edma(struct wil_tx_enhanced_desc *d,
1287 int tso_desc_type, bool is_ipv4,
1289 int skb_net_hdr_len,
1292 /* Number of descriptors */
1294 /* Maximum Segment Size */
1295 d->mac.tso_mss |= cpu_to_le16(mss >> 2);
1296 /* L4 header len: TCP header length */
1297 d->dma.l4_hdr_len |= tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK;
1298 /* EOP, TSO desc type, Segmentation enable,
1299 * Insert IPv4 and TCP / UDP Checksum
1301 d->dma.cmd |= BIT(WIL_EDMA_DESC_TX_CFG_EOP_POS) |
1302 tso_desc_type << WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS |
1303 BIT(WIL_EDMA_DESC_TX_CFG_SEG_EN_POS) |
1304 BIT(WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS) |
1305 BIT(WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS);
1306 /* Calculate pseudo-header */
1307 d->dma.w1 |= BIT(WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS) |
1308 BIT(WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS);
1309 /* IP Header Length */
1310 d->dma.ip_length |= skb_net_hdr_len;
1311 /* MAC header length and IP address family*/
1312 d->dma.b11 |= ETH_HLEN |
1313 is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
1316 static int wil_tx_tso_gen_desc(struct wil6210_priv *wil, void *buff_addr,
1317 int len, uint i, int tso_desc_type,
1318 skb_frag_t *frag, struct wil_ring *ring,
1319 struct sk_buff *skb, bool is_ipv4,
1320 int tcp_hdr_len, int skb_net_hdr_len,
1321 int mss, int *descs_used)
1323 struct device *dev = wil_to_dev(wil);
1324 struct wil_tx_enhanced_desc *_desc = (struct wil_tx_enhanced_desc *)
1325 &ring->va[i].tx.enhanced;
1326 struct wil_tx_enhanced_desc desc_mem, *d = &desc_mem;
1327 int ring_index = ring - wil->ring_tx;
1334 pa = dma_map_single(dev, buff_addr, len, DMA_TO_DEVICE);
1335 ring->ctx[i].mapped_as = wil_mapped_as_single;
1337 pa = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1338 ring->ctx[i].mapped_as = wil_mapped_as_page;
1340 if (unlikely(dma_mapping_error(dev, pa))) {
1341 wil_err(wil, "TSO: Skb DMA map error\n");
1345 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa,
1347 wil_tx_desc_offload_setup_tso_edma(d, tso_desc_type, is_ipv4,
1349 skb_net_hdr_len, mss);
1351 /* hold reference to skb
1352 * to prevent skb release before accounting
1353 * in case of immediate "tx done"
1355 if (tso_desc_type == wil_tso_type_lst)
1356 ring->ctx[i].skb = skb_get(skb);
1358 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1359 (const void *)d, sizeof(*d), false);
1367 static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil,
1368 struct wil6210_vif *vif,
1369 struct wil_ring *ring,
1370 struct sk_buff *skb)
1372 int ring_index = ring - wil->ring_tx;
1373 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
1374 int nr_frags = skb_shinfo(skb)->nr_frags;
1375 int min_desc_required = nr_frags + 2; /* Headers, Head, Fragments */
1376 int used, avail = wil_ring_avail_tx(ring);
1377 int f, hdrlen, headlen;
1380 u32 swhead = ring->swhead;
1381 int descs_used = 0; /* total number of used descriptors */
1384 int skb_net_hdr_len;
1385 int mss = skb_shinfo(skb)->gso_size;
1387 wil_dbg_txrx(wil, "tx_ring_tso: %d bytes to ring %d\n", skb->len,
1390 if (unlikely(!txdata->enabled))
1393 if (unlikely(avail < min_desc_required)) {
1394 wil_err_ratelimited(wil,
1395 "TSO: Tx ring[%2d] full. No space for %d fragments\n",
1396 ring_index, min_desc_required);
1400 gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
1412 if (skb->ip_summed != CHECKSUM_PARTIAL)
1415 /* tcp header length and skb network header length are fixed for all
1416 * packet's descriptors - read them once here
1418 tcp_hdr_len = tcp_hdrlen(skb);
1419 skb_net_hdr_len = skb_network_header_len(skb);
1421 /* First descriptor must contain the header only
1422 * Header Length = MAC header len + IP header len + TCP header len
1424 hdrlen = ETH_HLEN + tcp_hdr_len + skb_net_hdr_len;
1425 wil_dbg_txrx(wil, "TSO: process header descriptor, hdrlen %u\n",
1427 rc = wil_tx_tso_gen_desc(wil, skb->data, hdrlen, swhead,
1428 wil_tso_type_hdr, NULL, ring, skb,
1429 is_ipv4, tcp_hdr_len, skb_net_hdr_len,
1434 /* Second descriptor contains the head */
1435 headlen = skb_headlen(skb) - hdrlen;
1436 wil_dbg_txrx(wil, "TSO: process skb head, headlen %u\n", headlen);
1437 rc = wil_tx_tso_gen_desc(wil, skb->data + hdrlen, headlen,
1438 (swhead + descs_used) % ring->size,
1439 (nr_frags != 0) ? wil_tso_type_first :
1440 wil_tso_type_lst, NULL, ring, skb,
1441 is_ipv4, tcp_hdr_len, skb_net_hdr_len,
1446 /* Rest of the descriptors are from the SKB fragments */
1447 for (f = 0; f < nr_frags; f++) {
1448 skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1449 int len = frag->size;
1451 wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f,
1454 rc = wil_tx_tso_gen_desc(wil, NULL, len,
1455 (swhead + descs_used) % ring->size,
1456 (f != nr_frags - 1) ?
1457 wil_tso_type_mid : wil_tso_type_lst,
1458 frag, ring, skb, is_ipv4,
1459 tcp_hdr_len, skb_net_hdr_len,
1465 /* performance monitoring */
1466 used = wil_ring_used_tx(ring);
1467 if (wil_val_in_range(wil->ring_idle_trsh,
1468 used, used + descs_used)) {
1469 txdata->idle += get_cycles() - txdata->last_idle;
1470 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1471 ring_index, used, used + descs_used);
1474 /* advance swhead */
1475 wil_ring_advance_head(ring, descs_used);
1476 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, ring->swhead);
1478 /* make sure all writes to descriptors (shared memory) are done before
1479 * committing them to HW
1483 if (wil->tx_latency)
1484 *(ktime_t *)&skb->cb = ktime_get();
1486 memset(skb->cb, 0, sizeof(ktime_t));
1488 wil_w(wil, ring->hwtail, ring->swhead);
1493 while (descs_used > 0) {
1494 struct device *dev = wil_to_dev(wil);
1495 struct wil_ctx *ctx;
1496 int i = (swhead + descs_used - 1) % ring->size;
1497 struct wil_tx_enhanced_desc dd, *d = ⅆ
1498 struct wil_tx_enhanced_desc *_desc =
1499 (struct wil_tx_enhanced_desc *)
1500 &ring->va[i].tx.enhanced;
1503 ctx = &ring->ctx[i];
1504 wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
1505 memset(ctx, 0, sizeof(*ctx));
1511 static int wil_ring_init_bcast_edma(struct wil6210_vif *vif, int ring_id,
1514 struct wil6210_priv *wil = vif_to_wil(vif);
1515 struct wil_ring *ring = &wil->ring_tx[ring_id];
1517 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
1519 wil_dbg_misc(wil, "init bcast: ring_id=%d, sring_id=%d\n",
1520 ring_id, wil->tx_sring_idx);
1522 lockdep_assert_held(&wil->mutex);
1524 wil_tx_data_init(txdata);
1526 ring->is_rx = false;
1527 rc = wil_ring_alloc_desc_ring(wil, ring);
1531 wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; /* CID */
1532 wil->ring2cid_tid[ring_id][1] = 0; /* TID */
1534 txdata->dot1x_open = true;
1536 rc = wil_wmi_bcast_desc_ring_add(vif, ring_id);
1543 spin_lock_bh(&txdata->lock);
1544 txdata->enabled = 0;
1545 txdata->dot1x_open = false;
1546 spin_unlock_bh(&txdata->lock);
1547 wil_ring_free_edma(wil, ring);
1553 static void wil_tx_fini_edma(struct wil6210_priv *wil)
1555 struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
1557 wil_dbg_misc(wil, "free TX sring\n");
1559 wil_sring_free(wil, sring);
1562 static void wil_rx_data_free(struct wil_status_ring *sring)
1567 kfree_skb(sring->rx_data.skb);
1568 sring->rx_data.skb = NULL;
1571 static void wil_rx_fini_edma(struct wil6210_priv *wil)
1573 struct wil_ring *ring = &wil->ring_rx;
1576 wil_dbg_misc(wil, "rx_fini_edma\n");
1578 wil_ring_free_edma(wil, ring);
1580 for (i = 0; i < wil->num_rx_status_rings; i++) {
1581 wil_rx_data_free(&wil->srings[i]);
1582 wil_sring_free(wil, &wil->srings[i]);
1585 wil_free_rx_buff_arr(wil);
1588 void wil_init_txrx_ops_edma(struct wil6210_priv *wil)
1590 wil->txrx_ops.configure_interrupt_moderation =
1591 wil_configure_interrupt_moderation_edma;
1593 wil->txrx_ops.ring_init_tx = wil_ring_init_tx_edma;
1594 wil->txrx_ops.ring_fini_tx = wil_ring_free_edma;
1595 wil->txrx_ops.ring_init_bcast = wil_ring_init_bcast_edma;
1596 wil->txrx_ops.tx_init = wil_tx_init_edma;
1597 wil->txrx_ops.tx_fini = wil_tx_fini_edma;
1598 wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma;
1599 wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma;
1600 wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma;
1601 wil->txrx_ops.tx_ring_modify = wil_tx_ring_modify_edma;
1603 wil->txrx_ops.rx_init = wil_rx_init_edma;
1604 wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma;
1605 wil->txrx_ops.get_reorder_params = wil_get_reorder_params_edma;
1606 wil->txrx_ops.get_netif_rx_params = wil_get_netif_rx_params_edma;
1607 wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check_edma;
1608 wil->txrx_ops.rx_error_check = wil_rx_error_check_edma;
1609 wil->txrx_ops.is_rx_idle = wil_is_rx_idle_edma;
1610 wil->txrx_ops.rx_fini = wil_rx_fini_edma;