1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
16 #include <linux/log2.h>
17 #include <linux/bitfield.h>
19 /* when under memory pressure rx ring refill may fail and needs a retry */
20 #define HTT_RX_RING_REFILL_RETRY_MS 50
22 #define HTT_RX_RING_REFILL_RESCHED_MS 5
24 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
26 static struct sk_buff *
27 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
29 struct ath10k_skb_rxcb *rxcb;
31 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
32 if (rxcb->paddr == paddr)
33 return ATH10K_RXCB_SKB(rxcb);
39 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
42 struct ath10k_skb_rxcb *rxcb;
46 if (htt->rx_ring.in_ord_rx) {
47 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
48 skb = ATH10K_RXCB_SKB(rxcb);
49 dma_unmap_single(htt->ar->dev, rxcb->paddr,
50 skb->len + skb_tailroom(skb),
52 hash_del(&rxcb->hlist);
53 dev_kfree_skb_any(skb);
56 for (i = 0; i < htt->rx_ring.size; i++) {
57 skb = htt->rx_ring.netbufs_ring[i];
61 rxcb = ATH10K_SKB_RXCB(skb);
62 dma_unmap_single(htt->ar->dev, rxcb->paddr,
63 skb->len + skb_tailroom(skb),
65 dev_kfree_skb_any(skb);
69 htt->rx_ring.fill_cnt = 0;
70 hash_init(htt->rx_ring.skb_table);
71 memset(htt->rx_ring.netbufs_ring, 0,
72 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
75 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
77 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
80 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
82 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
85 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
88 htt->rx_ring.paddrs_ring_32 = vaddr;
91 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
94 htt->rx_ring.paddrs_ring_64 = vaddr;
97 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
98 dma_addr_t paddr, int idx)
100 htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
103 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
104 dma_addr_t paddr, int idx)
106 htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
109 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
111 htt->rx_ring.paddrs_ring_32[idx] = 0;
114 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
116 htt->rx_ring.paddrs_ring_64[idx] = 0;
119 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
121 return (void *)htt->rx_ring.paddrs_ring_32;
124 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
126 return (void *)htt->rx_ring.paddrs_ring_64;
129 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
131 struct htt_rx_desc *rx_desc;
132 struct ath10k_skb_rxcb *rxcb;
137 /* The Full Rx Reorder firmware has no way of telling the host
138 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
139 * To keep things simple make sure ring is always half empty. This
140 * guarantees there'll be no replenishment overruns possible.
142 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
144 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
146 if (idx < 0 || idx >= htt->rx_ring.size) {
147 ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
148 idx &= htt->rx_ring.size_mask;
154 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
160 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
162 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
165 /* Clear rx_desc attention word before posting to Rx ring */
166 rx_desc = (struct htt_rx_desc *)skb->data;
167 rx_desc->attention.flags = __cpu_to_le32(0);
169 paddr = dma_map_single(htt->ar->dev, skb->data,
170 skb->len + skb_tailroom(skb),
173 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
174 dev_kfree_skb_any(skb);
179 rxcb = ATH10K_SKB_RXCB(skb);
181 htt->rx_ring.netbufs_ring[idx] = skb;
182 ath10k_htt_set_paddrs_ring(htt, paddr, idx);
183 htt->rx_ring.fill_cnt++;
185 if (htt->rx_ring.in_ord_rx) {
186 hash_add(htt->rx_ring.skb_table,
187 &ATH10K_SKB_RXCB(skb)->hlist,
193 idx &= htt->rx_ring.size_mask;
198 * Make sure the rx buffer is updated before available buffer
199 * index to avoid any potential rx ring corruption.
202 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
206 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
208 lockdep_assert_held(&htt->rx_ring.lock);
209 return __ath10k_htt_rx_ring_fill_n(htt, num);
212 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
214 int ret, num_deficit, num_to_fill;
216 /* Refilling the whole RX ring buffer proves to be a bad idea. The
217 * reason is RX may take up significant amount of CPU cycles and starve
218 * other tasks, e.g. TX on an ethernet device while acting as a bridge
219 * with ath10k wlan interface. This ended up with very poor performance
220 * once CPU the host system was overwhelmed with RX on ath10k.
222 * By limiting the number of refills the replenishing occurs
223 * progressively. This in turns makes use of the fact tasklets are
224 * processed in FIFO order. This means actual RX processing can starve
225 * out refilling. If there's not enough buffers on RX ring FW will not
226 * report RX until it is refilled with enough buffers. This
227 * automatically balances load wrt to CPU power.
229 * This probably comes at a cost of lower maximum throughput but
230 * improves the average and stability.
232 spin_lock_bh(&htt->rx_ring.lock);
233 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
234 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
235 num_deficit -= num_to_fill;
236 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
237 if (ret == -ENOMEM) {
239 * Failed to fill it to the desired level -
240 * we'll start a timer and try again next time.
241 * As long as enough buffers are left in the ring for
242 * another A-MPDU rx, no special recovery is needed.
244 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
245 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
246 } else if (num_deficit > 0) {
247 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
248 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
250 spin_unlock_bh(&htt->rx_ring.lock);
253 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
255 struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
257 ath10k_htt_rx_msdu_buff_replenish(htt);
260 int ath10k_htt_rx_ring_refill(struct ath10k *ar)
262 struct ath10k_htt *htt = &ar->htt;
265 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
268 spin_lock_bh(&htt->rx_ring.lock);
269 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
270 htt->rx_ring.fill_cnt));
273 ath10k_htt_rx_ring_free(htt);
275 spin_unlock_bh(&htt->rx_ring.lock);
280 void ath10k_htt_rx_free(struct ath10k_htt *htt)
282 if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
285 del_timer_sync(&htt->rx_ring.refill_retry_timer);
287 skb_queue_purge(&htt->rx_msdus_q);
288 skb_queue_purge(&htt->rx_in_ord_compl_q);
289 skb_queue_purge(&htt->tx_fetch_ind_q);
291 spin_lock_bh(&htt->rx_ring.lock);
292 ath10k_htt_rx_ring_free(htt);
293 spin_unlock_bh(&htt->rx_ring.lock);
295 dma_free_coherent(htt->ar->dev,
296 ath10k_htt_get_rx_ring_size(htt),
297 ath10k_htt_get_vaddr_ring(htt),
298 htt->rx_ring.base_paddr);
300 dma_free_coherent(htt->ar->dev,
301 sizeof(*htt->rx_ring.alloc_idx.vaddr),
302 htt->rx_ring.alloc_idx.vaddr,
303 htt->rx_ring.alloc_idx.paddr);
305 kfree(htt->rx_ring.netbufs_ring);
308 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
310 struct ath10k *ar = htt->ar;
312 struct sk_buff *msdu;
314 lockdep_assert_held(&htt->rx_ring.lock);
316 if (htt->rx_ring.fill_cnt == 0) {
317 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
321 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
322 msdu = htt->rx_ring.netbufs_ring[idx];
323 htt->rx_ring.netbufs_ring[idx] = NULL;
324 ath10k_htt_reset_paddrs_ring(htt, idx);
327 idx &= htt->rx_ring.size_mask;
328 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
329 htt->rx_ring.fill_cnt--;
331 dma_unmap_single(htt->ar->dev,
332 ATH10K_SKB_RXCB(msdu)->paddr,
333 msdu->len + skb_tailroom(msdu),
335 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
336 msdu->data, msdu->len + skb_tailroom(msdu));
341 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
342 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
343 struct sk_buff_head *amsdu)
345 struct ath10k *ar = htt->ar;
346 int msdu_len, msdu_chaining = 0;
347 struct sk_buff *msdu;
348 struct htt_rx_desc *rx_desc;
350 lockdep_assert_held(&htt->rx_ring.lock);
353 int last_msdu, msdu_len_invalid, msdu_chained;
355 msdu = ath10k_htt_rx_netbuf_pop(htt);
357 __skb_queue_purge(amsdu);
361 __skb_queue_tail(amsdu, msdu);
363 rx_desc = (struct htt_rx_desc *)msdu->data;
365 /* FIXME: we must report msdu payload since this is what caller
368 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
369 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
372 * Sanity check - confirm the HW is finished filling in the
374 * If the HW and SW are working correctly, then it's guaranteed
375 * that the HW's MAC DMA is done before this point in the SW.
376 * To prevent the case that we handle a stale Rx descriptor,
377 * just assert for now until we have a way to recover.
379 if (!(__le32_to_cpu(rx_desc->attention.flags)
380 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
381 __skb_queue_purge(amsdu);
385 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
386 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
387 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
388 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
389 RX_MSDU_START_INFO0_MSDU_LENGTH);
390 msdu_chained = rx_desc->frag_info.ring2_more_count;
392 if (msdu_len_invalid)
396 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
397 msdu_len -= msdu->len;
399 /* Note: Chained buffers do not contain rx descriptor */
400 while (msdu_chained--) {
401 msdu = ath10k_htt_rx_netbuf_pop(htt);
403 __skb_queue_purge(amsdu);
407 __skb_queue_tail(amsdu, msdu);
409 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
410 msdu_len -= msdu->len;
414 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
415 RX_MSDU_END_INFO0_LAST_MSDU;
417 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
418 sizeof(*rx_desc) - sizeof(u32));
424 if (skb_queue_empty(amsdu))
428 * Don't refill the ring yet.
430 * First, the elements popped here are still in use - it is not
431 * safe to overwrite them until the matching call to
432 * mpdu_desc_list_next. Second, for efficiency it is preferable to
433 * refill the rx ring with 1 PPDU's worth of rx buffers (something
434 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
435 * (something like 3 buffers). Consequently, we'll rely on the txrx
436 * SW to tell us when it is done pulling all the PPDU's rx buffers
437 * out of the rx ring, and then refill it just once.
440 return msdu_chaining;
443 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
446 struct ath10k *ar = htt->ar;
447 struct ath10k_skb_rxcb *rxcb;
448 struct sk_buff *msdu;
450 lockdep_assert_held(&htt->rx_ring.lock);
452 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
456 rxcb = ATH10K_SKB_RXCB(msdu);
457 hash_del(&rxcb->hlist);
458 htt->rx_ring.fill_cnt--;
460 dma_unmap_single(htt->ar->dev, rxcb->paddr,
461 msdu->len + skb_tailroom(msdu),
463 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
464 msdu->data, msdu->len + skb_tailroom(msdu));
469 static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head,
470 struct sk_buff *frag_list,
471 unsigned int frag_len)
473 skb_shinfo(skb_head)->frag_list = frag_list;
474 skb_head->data_len = frag_len;
475 skb_head->len += skb_head->data_len;
478 static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
479 struct sk_buff *msdu,
480 struct htt_rx_in_ord_msdu_desc **msdu_desc)
482 struct ath10k *ar = htt->ar;
484 struct sk_buff *frag_buf;
485 struct sk_buff *prev_frag_buf;
487 struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc;
488 struct htt_rx_desc *rxd;
489 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
491 rxd = (void *)msdu->data;
492 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
494 skb_put(msdu, sizeof(struct htt_rx_desc));
495 skb_pull(msdu, sizeof(struct htt_rx_desc));
496 skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
497 amsdu_len -= msdu->len;
499 last_frag = ind_desc->reserved;
502 ath10k_warn(ar, "invalid amsdu len %u, left %d",
503 __le16_to_cpu(ind_desc->msdu_len),
510 paddr = __le32_to_cpu(ind_desc->msdu_paddr);
511 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
513 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr);
517 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
518 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
520 amsdu_len -= frag_buf->len;
521 prev_frag_buf = frag_buf;
522 last_frag = ind_desc->reserved;
525 paddr = __le32_to_cpu(ind_desc->msdu_paddr);
526 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
528 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x",
530 prev_frag_buf->next = NULL;
534 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
535 last_frag = ind_desc->reserved;
536 amsdu_len -= frag_buf->len;
538 prev_frag_buf->next = frag_buf;
539 prev_frag_buf = frag_buf;
543 ath10k_warn(ar, "invalid amsdu len %u, left %d",
544 __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
547 *msdu_desc = ind_desc;
549 prev_frag_buf->next = NULL;
554 ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
555 struct sk_buff *msdu,
556 struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
558 struct ath10k *ar = htt->ar;
560 struct sk_buff *frag_buf;
561 struct sk_buff *prev_frag_buf;
563 struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc;
564 struct htt_rx_desc *rxd;
565 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
567 rxd = (void *)msdu->data;
568 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
570 skb_put(msdu, sizeof(struct htt_rx_desc));
571 skb_pull(msdu, sizeof(struct htt_rx_desc));
572 skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
573 amsdu_len -= msdu->len;
575 last_frag = ind_desc->reserved;
578 ath10k_warn(ar, "invalid amsdu len %u, left %d",
579 __le16_to_cpu(ind_desc->msdu_len),
586 paddr = __le64_to_cpu(ind_desc->msdu_paddr);
587 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
589 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr);
593 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
594 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
596 amsdu_len -= frag_buf->len;
597 prev_frag_buf = frag_buf;
598 last_frag = ind_desc->reserved;
601 paddr = __le64_to_cpu(ind_desc->msdu_paddr);
602 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
604 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx",
606 prev_frag_buf->next = NULL;
610 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
611 last_frag = ind_desc->reserved;
612 amsdu_len -= frag_buf->len;
614 prev_frag_buf->next = frag_buf;
615 prev_frag_buf = frag_buf;
619 ath10k_warn(ar, "invalid amsdu len %u, left %d",
620 __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
623 *msdu_desc = ind_desc;
625 prev_frag_buf->next = NULL;
629 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
630 struct htt_rx_in_ord_ind *ev,
631 struct sk_buff_head *list)
633 struct ath10k *ar = htt->ar;
634 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
635 struct htt_rx_desc *rxd;
636 struct sk_buff *msdu;
641 lockdep_assert_held(&htt->rx_ring.lock);
643 msdu_count = __le16_to_cpu(ev->msdu_count);
644 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
646 while (msdu_count--) {
647 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
649 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
651 __skb_queue_purge(list);
655 if (!is_offload && ar->monitor_arvif) {
656 ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu,
659 __skb_queue_purge(list);
662 __skb_queue_tail(list, msdu);
667 __skb_queue_tail(list, msdu);
670 rxd = (void *)msdu->data;
672 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
674 skb_put(msdu, sizeof(*rxd));
675 skb_pull(msdu, sizeof(*rxd));
676 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
678 if (!(__le32_to_cpu(rxd->attention.flags) &
679 RX_ATTENTION_FLAGS_MSDU_DONE)) {
680 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
691 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
692 struct htt_rx_in_ord_ind *ev,
693 struct sk_buff_head *list)
695 struct ath10k *ar = htt->ar;
696 struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
697 struct htt_rx_desc *rxd;
698 struct sk_buff *msdu;
703 lockdep_assert_held(&htt->rx_ring.lock);
705 msdu_count = __le16_to_cpu(ev->msdu_count);
706 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
708 while (msdu_count--) {
709 paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
710 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
712 __skb_queue_purge(list);
716 if (!is_offload && ar->monitor_arvif) {
717 ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu,
720 __skb_queue_purge(list);
723 __skb_queue_tail(list, msdu);
728 __skb_queue_tail(list, msdu);
731 rxd = (void *)msdu->data;
733 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
735 skb_put(msdu, sizeof(*rxd));
736 skb_pull(msdu, sizeof(*rxd));
737 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
739 if (!(__le32_to_cpu(rxd->attention.flags) &
740 RX_ATTENTION_FLAGS_MSDU_DONE)) {
741 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
752 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
754 struct ath10k *ar = htt->ar;
756 void *vaddr, *vaddr_ring;
758 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
760 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
763 htt->rx_confused = false;
765 /* XXX: The fill level could be changed during runtime in response to
766 * the host processing latency. Is this really worth it?
768 htt->rx_ring.size = HTT_RX_RING_SIZE;
769 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
770 htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
772 if (!is_power_of_2(htt->rx_ring.size)) {
773 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
777 htt->rx_ring.netbufs_ring =
778 kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
780 if (!htt->rx_ring.netbufs_ring)
783 size = ath10k_htt_get_rx_ring_size(htt);
785 vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
789 ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
790 htt->rx_ring.base_paddr = paddr;
792 vaddr = dma_alloc_coherent(htt->ar->dev,
793 sizeof(*htt->rx_ring.alloc_idx.vaddr),
798 htt->rx_ring.alloc_idx.vaddr = vaddr;
799 htt->rx_ring.alloc_idx.paddr = paddr;
800 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
801 *htt->rx_ring.alloc_idx.vaddr = 0;
803 /* Initialize the Rx refill retry timer */
804 timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
806 spin_lock_init(&htt->rx_ring.lock);
808 htt->rx_ring.fill_cnt = 0;
809 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
810 hash_init(htt->rx_ring.skb_table);
812 skb_queue_head_init(&htt->rx_msdus_q);
813 skb_queue_head_init(&htt->rx_in_ord_compl_q);
814 skb_queue_head_init(&htt->tx_fetch_ind_q);
815 atomic_set(&htt->num_mpdus_ready, 0);
817 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
818 htt->rx_ring.size, htt->rx_ring.fill_level);
822 dma_free_coherent(htt->ar->dev,
823 ath10k_htt_get_rx_ring_size(htt),
825 htt->rx_ring.base_paddr);
827 kfree(htt->rx_ring.netbufs_ring);
832 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
833 enum htt_rx_mpdu_encrypt_type type)
836 case HTT_RX_MPDU_ENCRYPT_NONE:
838 case HTT_RX_MPDU_ENCRYPT_WEP40:
839 case HTT_RX_MPDU_ENCRYPT_WEP104:
840 return IEEE80211_WEP_IV_LEN;
841 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
842 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
843 return IEEE80211_TKIP_IV_LEN;
844 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
845 return IEEE80211_CCMP_HDR_LEN;
846 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
847 return IEEE80211_CCMP_256_HDR_LEN;
848 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
849 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
850 return IEEE80211_GCMP_HDR_LEN;
851 case HTT_RX_MPDU_ENCRYPT_WEP128:
852 case HTT_RX_MPDU_ENCRYPT_WAPI:
856 ath10k_warn(ar, "unsupported encryption type %d\n", type);
860 #define MICHAEL_MIC_LEN 8
862 static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
863 enum htt_rx_mpdu_encrypt_type type)
866 case HTT_RX_MPDU_ENCRYPT_NONE:
867 case HTT_RX_MPDU_ENCRYPT_WEP40:
868 case HTT_RX_MPDU_ENCRYPT_WEP104:
869 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
870 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
872 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
873 return IEEE80211_CCMP_MIC_LEN;
874 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
875 return IEEE80211_CCMP_256_MIC_LEN;
876 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
877 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
878 return IEEE80211_GCMP_MIC_LEN;
879 case HTT_RX_MPDU_ENCRYPT_WEP128:
880 case HTT_RX_MPDU_ENCRYPT_WAPI:
884 ath10k_warn(ar, "unsupported encryption type %d\n", type);
888 static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
889 enum htt_rx_mpdu_encrypt_type type)
892 case HTT_RX_MPDU_ENCRYPT_NONE:
893 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
894 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
895 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
896 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
898 case HTT_RX_MPDU_ENCRYPT_WEP40:
899 case HTT_RX_MPDU_ENCRYPT_WEP104:
900 return IEEE80211_WEP_ICV_LEN;
901 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
902 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
903 return IEEE80211_TKIP_ICV_LEN;
904 case HTT_RX_MPDU_ENCRYPT_WEP128:
905 case HTT_RX_MPDU_ENCRYPT_WAPI:
909 ath10k_warn(ar, "unsupported encryption type %d\n", type);
913 struct amsdu_subframe_hdr {
919 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
921 static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
927 ret = RATE_INFO_BW_20;
930 ret = RATE_INFO_BW_40;
933 ret = RATE_INFO_BW_80;
936 ret = RATE_INFO_BW_160;
943 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
944 struct ieee80211_rx_status *status,
945 struct htt_rx_desc *rxd)
947 struct ieee80211_supported_band *sband;
948 u8 cck, rate, bw, sgi, mcs, nss;
951 u32 info1, info2, info3;
953 info1 = __le32_to_cpu(rxd->ppdu_start.info1);
954 info2 = __le32_to_cpu(rxd->ppdu_start.info2);
955 info3 = __le32_to_cpu(rxd->ppdu_start.info3);
957 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
961 /* To get legacy rate index band is required. Since band can't
962 * be undefined check if freq is non-zero.
967 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
968 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
969 rate &= ~RX_PPDU_START_RATE_FLAG;
971 sband = &ar->mac.sbands[status->band];
972 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
975 case HTT_RX_HT_WITH_TXBF:
976 /* HT-SIG - Table 20-11 in info2 and info3 */
979 bw = (info2 >> 7) & 1;
980 sgi = (info3 >> 7) & 1;
982 status->rate_idx = mcs;
983 status->encoding = RX_ENC_HT;
985 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
987 status->bw = RATE_INFO_BW_40;
990 case HTT_RX_VHT_WITH_TXBF:
991 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
996 group_id = (info2 >> 4) & 0x3F;
998 if (GROUP_ID_IS_SU_MIMO(group_id)) {
999 mcs = (info3 >> 4) & 0x0F;
1000 nss = ((info2 >> 10) & 0x07) + 1;
1002 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
1003 * so it's impossible to decode MCS. Also since
1004 * firmware consumes Group Id Management frames host
1005 * has no knowledge regarding group/user position
1006 * mapping so it's impossible to pick the correct Nsts
1009 * Bandwidth and SGI are valid so report the rateinfo
1010 * on best-effort basis.
1017 ath10k_warn(ar, "invalid MCS received %u\n", mcs);
1018 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
1019 __le32_to_cpu(rxd->attention.flags),
1020 __le32_to_cpu(rxd->mpdu_start.info0),
1021 __le32_to_cpu(rxd->mpdu_start.info1),
1022 __le32_to_cpu(rxd->msdu_start.common.info0),
1023 __le32_to_cpu(rxd->msdu_start.common.info1),
1024 rxd->ppdu_start.info0,
1025 __le32_to_cpu(rxd->ppdu_start.info1),
1026 __le32_to_cpu(rxd->ppdu_start.info2),
1027 __le32_to_cpu(rxd->ppdu_start.info3),
1028 __le32_to_cpu(rxd->ppdu_start.info4));
1030 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
1031 __le32_to_cpu(rxd->msdu_end.common.info0),
1032 __le32_to_cpu(rxd->mpdu_end.info0));
1034 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
1035 "rx desc msdu payload: ",
1036 rxd->msdu_payload, 50);
1039 status->rate_idx = mcs;
1043 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1045 status->bw = ath10k_bw_to_mac80211_bw(bw);
1046 status->encoding = RX_ENC_VHT;
1053 static struct ieee80211_channel *
1054 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
1056 struct ath10k_peer *peer;
1057 struct ath10k_vif *arvif;
1058 struct cfg80211_chan_def def;
1061 lockdep_assert_held(&ar->data_lock);
1066 if (rxd->attention.flags &
1067 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
1070 if (!(rxd->msdu_end.common.info0 &
1071 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
1074 peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1075 RX_MPDU_START_INFO0_PEER_IDX);
1077 peer = ath10k_peer_find_by_id(ar, peer_id);
1081 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1082 if (WARN_ON_ONCE(!arvif))
1085 if (ath10k_mac_vif_chan(arvif->vif, &def))
1091 static struct ieee80211_channel *
1092 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
1094 struct ath10k_vif *arvif;
1095 struct cfg80211_chan_def def;
1097 lockdep_assert_held(&ar->data_lock);
1099 list_for_each_entry(arvif, &ar->arvifs, list) {
1100 if (arvif->vdev_id == vdev_id &&
1101 ath10k_mac_vif_chan(arvif->vif, &def) == 0)
1109 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
1110 struct ieee80211_chanctx_conf *conf,
1113 struct cfg80211_chan_def *def = data;
1118 static struct ieee80211_channel *
1119 ath10k_htt_rx_h_any_channel(struct ath10k *ar)
1121 struct cfg80211_chan_def def = {};
1123 ieee80211_iter_chan_contexts_atomic(ar->hw,
1124 ath10k_htt_rx_h_any_chan_iter,
1130 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
1131 struct ieee80211_rx_status *status,
1132 struct htt_rx_desc *rxd,
1135 struct ieee80211_channel *ch;
1137 spin_lock_bh(&ar->data_lock);
1138 ch = ar->scan_channel;
1140 ch = ar->rx_channel;
1142 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
1144 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
1146 ch = ath10k_htt_rx_h_any_channel(ar);
1148 ch = ar->tgt_oper_chan;
1149 spin_unlock_bh(&ar->data_lock);
1154 status->band = ch->band;
1155 status->freq = ch->center_freq;
1160 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
1161 struct ieee80211_rx_status *status,
1162 struct htt_rx_desc *rxd)
1166 for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
1167 status->chains &= ~BIT(i);
1169 if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
1170 status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
1171 rxd->ppdu_start.rssi_chains[i].pri20_mhz;
1173 status->chains |= BIT(i);
1177 /* FIXME: Get real NF */
1178 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
1179 rxd->ppdu_start.rssi_comb;
1180 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
1183 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
1184 struct ieee80211_rx_status *status,
1185 struct htt_rx_desc *rxd)
1187 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1188 * means all prior MSDUs in a PPDU are reported to mac80211 without the
1189 * TSF. Is it worth holding frames until end of PPDU is known?
1191 * FIXME: Can we get/compute 64bit TSF?
1193 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
1194 status->flag |= RX_FLAG_MACTIME_END;
1197 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
1198 struct sk_buff_head *amsdu,
1199 struct ieee80211_rx_status *status,
1202 struct sk_buff *first;
1203 struct htt_rx_desc *rxd;
1207 if (skb_queue_empty(amsdu))
1210 first = skb_peek(amsdu);
1211 rxd = (void *)first->data - sizeof(*rxd);
1213 is_first_ppdu = !!(rxd->attention.flags &
1214 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
1215 is_last_ppdu = !!(rxd->attention.flags &
1216 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
1218 if (is_first_ppdu) {
1219 /* New PPDU starts so clear out the old per-PPDU status. */
1221 status->rate_idx = 0;
1223 status->encoding = RX_ENC_LEGACY;
1224 status->bw = RATE_INFO_BW_20;
1226 status->flag &= ~RX_FLAG_MACTIME_END;
1227 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1229 status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
1230 status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1231 status->ampdu_reference = ar->ampdu_reference;
1233 ath10k_htt_rx_h_signal(ar, status, rxd);
1234 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
1235 ath10k_htt_rx_h_rates(ar, status, rxd);
1239 ath10k_htt_rx_h_mactime(ar, status, rxd);
1241 /* set ampdu last segment flag */
1242 status->flag |= RX_FLAG_AMPDU_IS_LAST;
1243 ar->ampdu_reference++;
1247 static const char * const tid_to_ac[] = {
1258 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
1263 if (!ieee80211_is_data_qos(hdr->frame_control))
1266 qc = ieee80211_get_qos_ctl(hdr);
1267 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1269 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
1271 snprintf(out, size, "tid %d", tid);
1276 static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
1277 struct ieee80211_rx_status *rx_status,
1278 struct sk_buff *skb)
1280 struct ieee80211_rx_status *status;
1282 status = IEEE80211_SKB_RXCB(skb);
1283 *status = *rx_status;
1285 skb_queue_tail(&ar->htt.rx_msdus_q, skb);
1288 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
1290 struct ieee80211_rx_status *status;
1291 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1294 status = IEEE80211_SKB_RXCB(skb);
1296 if (!(ar->filter_flags & FIF_FCSFAIL) &&
1297 status->flag & RX_FLAG_FAILED_FCS_CRC) {
1298 ar->stats.rx_crc_err_drop++;
1299 dev_kfree_skb_any(skb);
1303 ath10k_dbg(ar, ATH10K_DBG_DATA,
1304 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1307 ieee80211_get_SA(hdr),
1308 ath10k_get_tid(hdr, tid, sizeof(tid)),
1309 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
1311 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
1312 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1313 (status->encoding == RX_ENC_HT) ? "ht" : "",
1314 (status->encoding == RX_ENC_VHT) ? "vht" : "",
1315 (status->bw == RATE_INFO_BW_40) ? "40" : "",
1316 (status->bw == RATE_INFO_BW_80) ? "80" : "",
1317 (status->bw == RATE_INFO_BW_160) ? "160" : "",
1318 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1322 status->band, status->flag,
1323 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1324 !!(status->flag & RX_FLAG_MMIC_ERROR),
1325 !!(status->flag & RX_FLAG_AMSDU_MORE));
1326 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1327 skb->data, skb->len);
1328 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1329 trace_ath10k_rx_payload(ar, skb->data, skb->len);
1331 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
1334 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1335 struct ieee80211_hdr *hdr)
1337 int len = ieee80211_hdrlen(hdr->frame_control);
1339 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1340 ar->running_fw->fw_file.fw_features))
1341 len = round_up(len, 4);
1346 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1347 struct sk_buff *msdu,
1348 struct ieee80211_rx_status *status,
1349 enum htt_rx_mpdu_encrypt_type enctype,
1351 const u8 first_hdr[64])
1353 struct ieee80211_hdr *hdr;
1354 struct htt_rx_desc *rxd;
1359 bool msdu_limit_err;
1360 int bytes_aligned = ar->hw_params.decap_align_bytes;
1363 rxd = (void *)msdu->data - sizeof(*rxd);
1364 is_first = !!(rxd->msdu_end.common.info0 &
1365 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1366 is_last = !!(rxd->msdu_end.common.info0 &
1367 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1369 /* Delivered decapped frame:
1371 * [crypto param] <-- can be trimmed if !fcs_err &&
1372 * !decrypt_err && !peer_idx_invalid
1373 * [amsdu header] <-- only if A-MSDU
1376 * [FCS] <-- at end, needs to be trimmed
1379 /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
1380 * deaggregate, so that unwanted MSDU-deaggregation is avoided for
1381 * error packets. If limit exceeds, hw sends all remaining MSDUs as
1382 * a single last MSDU with this msdu limit error set.
1384 msdu_limit_err = ath10k_rx_desc_msdu_limit_error(&ar->hw_params, rxd);
1386 /* If MSDU limit error happens, then don't warn on, the partial raw MSDU
1387 * without first MSDU is expected in that case, and handled later here.
1389 /* This probably shouldn't happen but warn just in case */
1390 if (WARN_ON_ONCE(!is_first && !msdu_limit_err))
1393 /* This probably shouldn't happen but warn just in case */
1394 if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err))
1397 skb_trim(msdu, msdu->len - FCS_LEN);
1399 /* Push original 80211 header */
1400 if (unlikely(msdu_limit_err)) {
1401 hdr = (struct ieee80211_hdr *)first_hdr;
1402 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1403 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1405 if (ieee80211_is_data_qos(hdr->frame_control)) {
1406 qos = ieee80211_get_qos_ctl(hdr);
1407 qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1411 memcpy(skb_push(msdu, crypto_len),
1412 (void *)hdr + round_up(hdr_len, bytes_aligned),
1415 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1418 /* In most cases this will be true for sniffed frames. It makes sense
1419 * to deliver them as-is without stripping the crypto param. This is
1420 * necessary for software based decryption.
1422 * If there's no error then the frame is decrypted. At least that is
1423 * the case for frames that come in via fragmented rx indication.
1428 /* The payload is decrypted so strip crypto params. Start from tail
1429 * since hdr is used to compute some stuff.
1432 hdr = (void *)msdu->data;
1435 if (status->flag & RX_FLAG_IV_STRIPPED) {
1436 skb_trim(msdu, msdu->len -
1437 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1439 skb_trim(msdu, msdu->len -
1440 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1443 if (status->flag & RX_FLAG_MIC_STRIPPED)
1444 skb_trim(msdu, msdu->len -
1445 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1448 if (status->flag & RX_FLAG_ICV_STRIPPED)
1449 skb_trim(msdu, msdu->len -
1450 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1454 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1455 !ieee80211_has_morefrags(hdr->frame_control) &&
1456 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1457 skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
1460 if (status->flag & RX_FLAG_IV_STRIPPED) {
1461 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1462 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1464 memmove((void *)msdu->data + crypto_len,
1465 (void *)msdu->data, hdr_len);
1466 skb_pull(msdu, crypto_len);
1470 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1471 struct sk_buff *msdu,
1472 struct ieee80211_rx_status *status,
1473 const u8 first_hdr[64],
1474 enum htt_rx_mpdu_encrypt_type enctype)
1476 struct ieee80211_hdr *hdr;
1477 struct htt_rx_desc *rxd;
1482 int bytes_aligned = ar->hw_params.decap_align_bytes;
1484 /* Delivered decapped frame:
1485 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1488 * Note: The nwifi header doesn't have QoS Control and is
1489 * (always?) a 3addr frame.
1491 * Note2: There's no A-MSDU subframe header. Even if it's part
1495 /* pull decapped header and copy SA & DA */
1496 rxd = (void *)msdu->data - sizeof(*rxd);
1498 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1499 skb_put(msdu, l3_pad_bytes);
1501 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1503 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1504 ether_addr_copy(da, ieee80211_get_DA(hdr));
1505 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1506 skb_pull(msdu, hdr_len);
1508 /* push original 802.11 header */
1509 hdr = (struct ieee80211_hdr *)first_hdr;
1510 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1512 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1513 memcpy(skb_push(msdu,
1514 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1515 (void *)hdr + round_up(hdr_len, bytes_aligned),
1516 ath10k_htt_rx_crypto_param_len(ar, enctype));
1519 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1521 /* original 802.11 header has a different DA and in
1522 * case of 4addr it may also have different SA
1524 hdr = (struct ieee80211_hdr *)msdu->data;
1525 ether_addr_copy(ieee80211_get_DA(hdr), da);
1526 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1529 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1530 struct sk_buff *msdu,
1531 enum htt_rx_mpdu_encrypt_type enctype)
1533 struct ieee80211_hdr *hdr;
1534 struct htt_rx_desc *rxd;
1535 size_t hdr_len, crypto_len;
1537 bool is_first, is_last, is_amsdu;
1538 int bytes_aligned = ar->hw_params.decap_align_bytes;
1540 rxd = (void *)msdu->data - sizeof(*rxd);
1541 hdr = (void *)rxd->rx_hdr_status;
1543 is_first = !!(rxd->msdu_end.common.info0 &
1544 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1545 is_last = !!(rxd->msdu_end.common.info0 &
1546 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1547 is_amsdu = !(is_first && is_last);
1552 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1553 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1555 rfc1042 += round_up(hdr_len, bytes_aligned) +
1556 round_up(crypto_len, bytes_aligned);
1560 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1565 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1566 struct sk_buff *msdu,
1567 struct ieee80211_rx_status *status,
1568 const u8 first_hdr[64],
1569 enum htt_rx_mpdu_encrypt_type enctype)
1571 struct ieee80211_hdr *hdr;
1578 struct htt_rx_desc *rxd;
1579 int bytes_aligned = ar->hw_params.decap_align_bytes;
1581 /* Delivered decapped frame:
1582 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1586 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1587 if (WARN_ON_ONCE(!rfc1042))
1590 rxd = (void *)msdu->data - sizeof(*rxd);
1591 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1592 skb_put(msdu, l3_pad_bytes);
1593 skb_pull(msdu, l3_pad_bytes);
1595 /* pull decapped header and copy SA & DA */
1596 eth = (struct ethhdr *)msdu->data;
1597 ether_addr_copy(da, eth->h_dest);
1598 ether_addr_copy(sa, eth->h_source);
1599 skb_pull(msdu, sizeof(struct ethhdr));
1601 /* push rfc1042/llc/snap */
1602 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1603 sizeof(struct rfc1042_hdr));
1605 /* push original 802.11 header */
1606 hdr = (struct ieee80211_hdr *)first_hdr;
1607 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1609 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1610 memcpy(skb_push(msdu,
1611 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1612 (void *)hdr + round_up(hdr_len, bytes_aligned),
1613 ath10k_htt_rx_crypto_param_len(ar, enctype));
1616 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1618 /* original 802.11 header has a different DA and in
1619 * case of 4addr it may also have different SA
1621 hdr = (struct ieee80211_hdr *)msdu->data;
1622 ether_addr_copy(ieee80211_get_DA(hdr), da);
1623 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1626 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1627 struct sk_buff *msdu,
1628 struct ieee80211_rx_status *status,
1629 const u8 first_hdr[64],
1630 enum htt_rx_mpdu_encrypt_type enctype)
1632 struct ieee80211_hdr *hdr;
1635 struct htt_rx_desc *rxd;
1636 int bytes_aligned = ar->hw_params.decap_align_bytes;
1638 /* Delivered decapped frame:
1639 * [amsdu header] <-- replaced with 802.11 hdr
1644 rxd = (void *)msdu->data - sizeof(*rxd);
1645 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1647 skb_put(msdu, l3_pad_bytes);
1648 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1650 hdr = (struct ieee80211_hdr *)first_hdr;
1651 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1653 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1654 memcpy(skb_push(msdu,
1655 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1656 (void *)hdr + round_up(hdr_len, bytes_aligned),
1657 ath10k_htt_rx_crypto_param_len(ar, enctype));
1660 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1663 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1664 struct sk_buff *msdu,
1665 struct ieee80211_rx_status *status,
1667 enum htt_rx_mpdu_encrypt_type enctype,
1670 struct htt_rx_desc *rxd;
1671 enum rx_msdu_decap_format decap;
1673 /* First msdu's decapped header:
1674 * [802.11 header] <-- padded to 4 bytes long
1675 * [crypto param] <-- padded to 4 bytes long
1676 * [amsdu header] <-- only if A-MSDU
1679 * Other (2nd, 3rd, ..) msdu's decapped header:
1680 * [amsdu header] <-- only if A-MSDU
1684 rxd = (void *)msdu->data - sizeof(*rxd);
1685 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1686 RX_MSDU_START_INFO1_DECAP_FORMAT);
1689 case RX_MSDU_DECAP_RAW:
1690 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1691 is_decrypted, first_hdr);
1693 case RX_MSDU_DECAP_NATIVE_WIFI:
1694 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1697 case RX_MSDU_DECAP_ETHERNET2_DIX:
1698 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1700 case RX_MSDU_DECAP_8023_SNAP_LLC:
1701 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1707 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1709 struct htt_rx_desc *rxd;
1711 bool is_ip4, is_ip6;
1712 bool is_tcp, is_udp;
1713 bool ip_csum_ok, tcpudp_csum_ok;
1715 rxd = (void *)skb->data - sizeof(*rxd);
1716 flags = __le32_to_cpu(rxd->attention.flags);
1717 info = __le32_to_cpu(rxd->msdu_start.common.info1);
1719 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1720 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1721 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1722 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1723 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1724 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1726 if (!is_ip4 && !is_ip6)
1727 return CHECKSUM_NONE;
1728 if (!is_tcp && !is_udp)
1729 return CHECKSUM_NONE;
1731 return CHECKSUM_NONE;
1732 if (!tcpudp_csum_ok)
1733 return CHECKSUM_NONE;
1735 return CHECKSUM_UNNECESSARY;
1738 static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1740 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1743 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1744 struct sk_buff_head *amsdu,
1745 struct ieee80211_rx_status *status,
1746 bool fill_crypt_header,
1748 enum ath10k_pkt_rx_err *err)
1750 struct sk_buff *first;
1751 struct sk_buff *last;
1752 struct sk_buff *msdu;
1753 struct htt_rx_desc *rxd;
1754 struct ieee80211_hdr *hdr;
1755 enum htt_rx_mpdu_encrypt_type enctype;
1759 bool has_crypto_err;
1761 bool has_peer_idx_invalid;
1766 if (skb_queue_empty(amsdu))
1769 first = skb_peek(amsdu);
1770 rxd = (void *)first->data - sizeof(*rxd);
1772 is_mgmt = !!(rxd->attention.flags &
1773 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1775 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1776 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1778 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1779 * decapped header. It'll be used for undecapping of each MSDU.
1781 hdr = (void *)rxd->rx_hdr_status;
1782 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1785 memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1787 /* Each A-MSDU subframe will use the original header as the base and be
1788 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1790 hdr = (void *)first_hdr;
1792 if (ieee80211_is_data_qos(hdr->frame_control)) {
1793 qos = ieee80211_get_qos_ctl(hdr);
1794 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1797 /* Some attention flags are valid only in the last MSDU. */
1798 last = skb_peek_tail(amsdu);
1799 rxd = (void *)last->data - sizeof(*rxd);
1800 attention = __le32_to_cpu(rxd->attention.flags);
1802 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1803 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1804 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1805 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1807 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1808 * e.g. due to fcs error, missing peer or invalid key data it will
1809 * report the frame as raw.
1811 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1814 !has_peer_idx_invalid);
1816 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1817 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1818 RX_FLAG_MMIC_ERROR |
1820 RX_FLAG_IV_STRIPPED |
1821 RX_FLAG_ONLY_MONITOR |
1822 RX_FLAG_MMIC_STRIPPED);
1825 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1828 status->flag |= RX_FLAG_MMIC_ERROR;
1832 *err = ATH10K_PKT_RX_ERR_FCS;
1833 else if (has_tkip_err)
1834 *err = ATH10K_PKT_RX_ERR_TKIP;
1835 else if (has_crypto_err)
1836 *err = ATH10K_PKT_RX_ERR_CRYPT;
1837 else if (has_peer_idx_invalid)
1838 *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
1841 /* Firmware reports all necessary management frames via WMI already.
1842 * They are not reported to monitor interfaces at all so pass the ones
1843 * coming via HTT to monitor interfaces instead. This simplifies
1847 status->flag |= RX_FLAG_ONLY_MONITOR;
1850 status->flag |= RX_FLAG_DECRYPTED;
1852 if (likely(!is_mgmt))
1853 status->flag |= RX_FLAG_MMIC_STRIPPED;
1855 if (fill_crypt_header)
1856 status->flag |= RX_FLAG_MIC_STRIPPED |
1857 RX_FLAG_ICV_STRIPPED;
1859 status->flag |= RX_FLAG_IV_STRIPPED;
1862 skb_queue_walk(amsdu, msdu) {
1863 ath10k_htt_rx_h_csum_offload(msdu);
1864 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1867 /* Undecapping involves copying the original 802.11 header back
1868 * to sk_buff. If frame is protected and hardware has decrypted
1869 * it then remove the protected bit.
1876 if (fill_crypt_header)
1879 hdr = (void *)msdu->data;
1880 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1884 static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
1885 struct sk_buff_head *amsdu,
1886 struct ieee80211_rx_status *status)
1888 struct sk_buff *msdu;
1889 struct sk_buff *first_subframe;
1891 first_subframe = skb_peek(amsdu);
1893 while ((msdu = __skb_dequeue(amsdu))) {
1894 /* Setup per-MSDU flags */
1895 if (skb_queue_empty(amsdu))
1896 status->flag &= ~RX_FLAG_AMSDU_MORE;
1898 status->flag |= RX_FLAG_AMSDU_MORE;
1900 if (msdu == first_subframe) {
1901 first_subframe = NULL;
1902 status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
1904 status->flag |= RX_FLAG_ALLOW_SAME_PN;
1907 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
1911 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
1912 unsigned long *unchain_cnt)
1914 struct sk_buff *skb, *first;
1917 int amsdu_len = skb_queue_len(amsdu);
1919 /* TODO: Might could optimize this by using
1920 * skb_try_coalesce or similar method to
1921 * decrease copying, or maybe get mac80211 to
1922 * provide a way to just receive a list of
1926 first = __skb_dequeue(amsdu);
1928 /* Allocate total length all at once. */
1929 skb_queue_walk(amsdu, skb)
1930 total_len += skb->len;
1932 space = total_len - skb_tailroom(first);
1934 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1935 /* TODO: bump some rx-oom error stat */
1936 /* put it back together so we can free the
1937 * whole list at once.
1939 __skb_queue_head(amsdu, first);
1943 /* Walk list again, copying contents into
1946 while ((skb = __skb_dequeue(amsdu))) {
1947 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1949 dev_kfree_skb_any(skb);
1952 __skb_queue_head(amsdu, first);
1954 *unchain_cnt += amsdu_len - 1;
1959 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1960 struct sk_buff_head *amsdu,
1961 unsigned long *drop_cnt,
1962 unsigned long *unchain_cnt)
1964 struct sk_buff *first;
1965 struct htt_rx_desc *rxd;
1966 enum rx_msdu_decap_format decap;
1968 first = skb_peek(amsdu);
1969 rxd = (void *)first->data - sizeof(*rxd);
1970 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1971 RX_MSDU_START_INFO1_DECAP_FORMAT);
1973 /* FIXME: Current unchaining logic can only handle simple case of raw
1974 * msdu chaining. If decapping is other than raw the chaining may be
1975 * more complex and this isn't handled by the current code. Don't even
1976 * try re-constructing such frames - it'll be pretty much garbage.
1978 if (decap != RX_MSDU_DECAP_RAW ||
1979 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1980 *drop_cnt += skb_queue_len(amsdu);
1981 __skb_queue_purge(amsdu);
1985 ath10k_unchain_msdu(amsdu, unchain_cnt);
1988 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1989 struct sk_buff_head *amsdu,
1990 struct ieee80211_rx_status *rx_status)
1992 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1993 * invalid/dangerous frames.
1996 if (!rx_status->freq) {
1997 ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
2001 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
2002 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
2009 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
2010 struct sk_buff_head *amsdu,
2011 struct ieee80211_rx_status *rx_status,
2012 unsigned long *drop_cnt)
2014 if (skb_queue_empty(amsdu))
2017 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
2021 *drop_cnt += skb_queue_len(amsdu);
2023 __skb_queue_purge(amsdu);
2026 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
2028 struct ath10k *ar = htt->ar;
2029 struct ieee80211_rx_status *rx_status = &htt->rx_status;
2030 struct sk_buff_head amsdu;
2032 unsigned long drop_cnt = 0;
2033 unsigned long unchain_cnt = 0;
2034 unsigned long drop_cnt_filter = 0;
2035 unsigned long msdus_to_queue, num_msdus;
2036 enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
2037 u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
2039 __skb_queue_head_init(&amsdu);
2041 spin_lock_bh(&htt->rx_ring.lock);
2042 if (htt->rx_confused) {
2043 spin_unlock_bh(&htt->rx_ring.lock);
2046 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
2047 spin_unlock_bh(&htt->rx_ring.lock);
2050 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
2051 __skb_queue_purge(&amsdu);
2052 /* FIXME: It's probably a good idea to reboot the
2053 * device instead of leaving it inoperable.
2055 htt->rx_confused = true;
2059 num_msdus = skb_queue_len(&amsdu);
2061 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
2063 /* only for ret = 1 indicates chained msdus */
2065 ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
2067 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
2068 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
2069 msdus_to_queue = skb_queue_len(&amsdu);
2070 ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
2072 ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
2073 unchain_cnt, drop_cnt, drop_cnt_filter,
2079 static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
2080 union htt_rx_pn_t *pn,
2083 switch (pn_len_bits) {
2085 pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) +
2086 ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32);
2089 pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
2094 static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
2095 union htt_rx_pn_t *old_pn)
2097 return ((new_pn->pn48 & 0xffffffffffffULL) <=
2098 (old_pn->pn48 & 0xffffffffffffULL));
2101 static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
2102 struct ath10k_peer *peer,
2103 struct htt_rx_indication_hl *rx)
2105 bool last_pn_valid, pn_invalid = false;
2106 enum htt_txrx_sec_cast_type sec_index;
2107 enum htt_security_types sec_type;
2108 union htt_rx_pn_t new_pn = {0};
2109 struct htt_hl_rx_desc *rx_desc;
2110 union htt_rx_pn_t *last_pn;
2111 u32 rx_desc_info, tid;
2112 int num_mpdu_ranges;
2114 lockdep_assert_held(&ar->data_lock);
2119 if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU))
2122 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2123 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2125 rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2126 rx_desc_info = __le32_to_cpu(rx_desc->info);
2128 if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED))
2131 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2132 last_pn_valid = peer->tids_last_pn_valid[tid];
2133 last_pn = &peer->tids_last_pn[tid];
2135 if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2136 sec_index = HTT_TXRX_SEC_MCAST;
2138 sec_index = HTT_TXRX_SEC_UCAST;
2140 sec_type = peer->rx_pn[sec_index].sec_type;
2141 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2143 if (sec_type != HTT_SECURITY_AES_CCMP &&
2144 sec_type != HTT_SECURITY_TKIP &&
2145 sec_type != HTT_SECURITY_TKIP_NOMIC)
2149 pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
2151 peer->tids_last_pn_valid[tid] = true;
2154 last_pn->pn48 = new_pn.pn48;
2159 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
2160 struct htt_rx_indication_hl *rx,
2161 struct sk_buff *skb,
2162 enum htt_rx_pn_check_type check_pn_type,
2163 enum htt_rx_tkip_demic_type tkip_mic_type)
2165 struct ath10k *ar = htt->ar;
2166 struct ath10k_peer *peer;
2167 struct htt_rx_indication_mpdu_range *mpdu_ranges;
2168 struct fw_rx_desc_hl *fw_desc;
2169 enum htt_txrx_sec_cast_type sec_index;
2170 enum htt_security_types sec_type;
2171 union htt_rx_pn_t new_pn = {0};
2172 struct htt_hl_rx_desc *rx_desc;
2173 struct ieee80211_hdr *hdr;
2174 struct ieee80211_rx_status *rx_status;
2177 int num_mpdu_ranges;
2179 struct ieee80211_channel *ch;
2180 bool pn_invalid, qos, first_msdu;
2181 u32 tid, rx_desc_info;
2183 peer_id = __le16_to_cpu(rx->hdr.peer_id);
2184 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2186 spin_lock_bh(&ar->data_lock);
2187 peer = ath10k_peer_find_by_id(ar, peer_id);
2188 spin_unlock_bh(&ar->data_lock);
2189 if (!peer && peer_id != HTT_INVALID_PEERID)
2190 ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
2195 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2196 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2197 mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
2198 fw_desc = &rx->fw_desc;
2199 rx_desc_len = fw_desc->len;
2201 /* I have not yet seen any case where num_mpdu_ranges > 1.
2202 * qcacld does not seem handle that case either, so we introduce the
2203 * same limitiation here as well.
2205 if (num_mpdu_ranges > 1)
2207 "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
2210 if (mpdu_ranges->mpdu_range_status !=
2211 HTT_RX_IND_MPDU_STATUS_OK &&
2212 mpdu_ranges->mpdu_range_status !=
2213 HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
2214 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",
2215 mpdu_ranges->mpdu_range_status);
2219 rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2220 rx_desc_info = __le32_to_cpu(rx_desc->info);
2222 if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2223 sec_index = HTT_TXRX_SEC_MCAST;
2225 sec_index = HTT_TXRX_SEC_UCAST;
2227 sec_type = peer->rx_pn[sec_index].sec_type;
2228 first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU;
2230 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2232 if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) {
2233 spin_lock_bh(&ar->data_lock);
2234 pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx);
2235 spin_unlock_bh(&ar->data_lock);
2241 /* Strip off all headers before the MAC header before delivery to
2244 tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
2245 sizeof(rx->ppdu) + sizeof(rx->prefix) +
2246 sizeof(rx->fw_desc) +
2247 sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
2249 skb_pull(skb, tot_hdr_len);
2251 hdr = (struct ieee80211_hdr *)skb->data;
2252 qos = ieee80211_is_data_qos(hdr->frame_control);
2254 rx_status = IEEE80211_SKB_RXCB(skb);
2255 memset(rx_status, 0, sizeof(*rx_status));
2257 if (rx->ppdu.combined_rssi == 0) {
2258 /* SDIO firmware does not provide signal */
2259 rx_status->signal = 0;
2260 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2262 rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
2263 rx->ppdu.combined_rssi;
2264 rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
2267 spin_lock_bh(&ar->data_lock);
2268 ch = ar->scan_channel;
2270 ch = ar->rx_channel;
2272 ch = ath10k_htt_rx_h_any_channel(ar);
2274 ch = ar->tgt_oper_chan;
2275 spin_unlock_bh(&ar->data_lock);
2278 rx_status->band = ch->band;
2279 rx_status->freq = ch->center_freq;
2281 if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
2282 rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
2284 rx_status->flag |= RX_FLAG_AMSDU_MORE;
2286 /* Not entirely sure about this, but all frames from the chipset has
2287 * the protected flag set even though they have already been decrypted.
2288 * Unmasking this flag is necessary in order for mac80211 not to drop
2290 * TODO: Verify this is always the case or find out a way to check
2291 * if there has been hw decryption.
2293 if (ieee80211_has_protected(hdr->frame_control)) {
2294 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2295 rx_status->flag |= RX_FLAG_DECRYPTED |
2296 RX_FLAG_IV_STRIPPED |
2297 RX_FLAG_MMIC_STRIPPED;
2299 if (tid < IEEE80211_NUM_TIDS &&
2301 check_pn_type == HTT_RX_PN_CHECK &&
2302 (sec_type == HTT_SECURITY_AES_CCMP ||
2303 sec_type == HTT_SECURITY_TKIP ||
2304 sec_type == HTT_SECURITY_TKIP_NOMIC)) {
2307 __le64 pn48 = cpu_to_le64(new_pn.pn48);
2309 hdr = (struct ieee80211_hdr *)skb->data;
2310 offset = ieee80211_hdrlen(hdr->frame_control);
2311 hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2312 rx_status->flag &= ~RX_FLAG_IV_STRIPPED;
2314 memmove(skb->data - IEEE80211_CCMP_HDR_LEN,
2316 skb_push(skb, IEEE80211_CCMP_HDR_LEN);
2317 ivp = skb->data + offset;
2318 memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN);
2320 ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV;
2322 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
2323 if (peer->keys[i] &&
2324 peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE)
2325 keyidx = peer->keys[i]->keyidx;
2329 ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6;
2331 if (sec_type == HTT_SECURITY_AES_CCMP) {
2332 rx_status->flag |= RX_FLAG_MIC_STRIPPED;
2334 memcpy(skb->data + offset, &pn48, 2);
2335 /* pn 1, pn 3 , pn 34 , pn 5 */
2336 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2338 rx_status->flag |= RX_FLAG_ICV_STRIPPED;
2340 memcpy(skb->data + offset + 2, &pn48, 1);
2342 memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1);
2343 /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
2344 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2349 if (tkip_mic_type == HTT_RX_TKIP_MIC)
2350 rx_status->flag &= ~RX_FLAG_IV_STRIPPED &
2351 ~RX_FLAG_MMIC_STRIPPED;
2353 if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
2354 rx_status->flag |= RX_FLAG_MMIC_ERROR;
2356 if (!qos && tid < IEEE80211_NUM_TIDS) {
2358 __le16 qos_ctrl = 0;
2360 hdr = (struct ieee80211_hdr *)skb->data;
2361 offset = ieee80211_hdrlen(hdr->frame_control);
2363 hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2364 memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset);
2365 skb_push(skb, IEEE80211_QOS_CTL_LEN);
2366 qos_ctrl = cpu_to_le16(tid);
2367 memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
2371 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
2373 ieee80211_rx_ni(ar->hw, skb);
2375 /* We have delivered the skb to the upper layers (mac80211) so we
2380 /* Tell the caller that it must free the skb since we have not
2386 static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb,
2392 orig_hdr = skb->data;
2393 ivp = orig_hdr + hdr_len + head_len;
2395 /* the ExtIV bit is always set to 1 for TKIP */
2396 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2399 memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2400 skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2401 skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN);
2405 static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb,
2411 orig_hdr = skb->data;
2412 ivp = orig_hdr + hdr_len + head_len;
2414 /* the ExtIV bit is always set to 1 for TKIP */
2415 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2418 memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2419 skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2420 skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
2424 static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb,
2430 orig_hdr = skb->data;
2431 ivp = orig_hdr + hdr_len + head_len;
2433 /* the ExtIV bit is always set to 1 for CCMP */
2434 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2437 skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN);
2438 memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len);
2439 skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
2443 static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb,
2449 orig_hdr = skb->data;
2451 memmove(orig_hdr + IEEE80211_WEP_IV_LEN,
2452 orig_hdr, head_len + hdr_len);
2453 skb_pull(skb, IEEE80211_WEP_IV_LEN);
2454 skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
2458 static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
2459 struct htt_rx_fragment_indication *rx,
2460 struct sk_buff *skb)
2462 struct ath10k *ar = htt->ar;
2463 enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC;
2464 enum htt_txrx_sec_cast_type sec_index;
2465 struct htt_rx_indication_hl *rx_hl;
2466 enum htt_security_types sec_type;
2467 u32 tid, frag, seq, rx_desc_info;
2468 union htt_rx_pn_t new_pn = {0};
2469 struct htt_hl_rx_desc *rx_desc;
2470 u16 peer_id, sc, hdr_space;
2471 union htt_rx_pn_t *last_pn;
2472 struct ieee80211_hdr *hdr;
2473 int ret, num_mpdu_ranges;
2474 struct ath10k_peer *peer;
2475 struct htt_resp *resp;
2478 resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2479 skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2480 skb_trim(skb, skb->len - FCS_LEN);
2482 peer_id = __le16_to_cpu(rx->peer_id);
2483 rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl);
2485 spin_lock_bh(&ar->data_lock);
2486 peer = ath10k_peer_find_by_id(ar, peer_id);
2488 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id);
2492 num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1),
2493 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2495 tot_hdr_len = sizeof(struct htt_resp_hdr) +
2496 sizeof(rx_hl->hdr) +
2497 sizeof(rx_hl->ppdu) +
2498 sizeof(rx_hl->prefix) +
2499 sizeof(rx_hl->fw_desc) +
2500 sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges;
2502 tid = MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2503 rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
2504 rx_desc_info = __le32_to_cpu(rx_desc->info);
2506 if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
2507 spin_unlock_bh(&ar->data_lock);
2508 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2509 HTT_RX_NON_PN_CHECK,
2510 HTT_RX_NON_TKIP_MIC);
2513 hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
2515 if (ieee80211_has_retry(hdr->frame_control))
2518 hdr_space = ieee80211_hdrlen(hdr->frame_control);
2519 sc = __le16_to_cpu(hdr->seq_ctrl);
2520 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2521 frag = sc & IEEE80211_SCTL_FRAG;
2523 sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?
2524 HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST;
2525 sec_type = peer->rx_pn[sec_index].sec_type;
2526 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2529 case HTT_SECURITY_TKIP:
2530 tkip_mic = HTT_RX_TKIP_MIC;
2531 ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb,
2538 case HTT_SECURITY_TKIP_NOMIC:
2539 ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb,
2546 case HTT_SECURITY_AES_CCMP:
2547 ret = ath10k_htt_rx_frag_ccmp_decap(skb,
2548 tot_hdr_len + rx_hl->fw_desc.len,
2553 case HTT_SECURITY_WEP128:
2554 case HTT_SECURITY_WEP104:
2555 case HTT_SECURITY_WEP40:
2556 ret = ath10k_htt_rx_frag_wep_decap(skb,
2557 tot_hdr_len + rx_hl->fw_desc.len,
2566 resp = (struct htt_resp *)(skb->data);
2568 if (sec_type != HTT_SECURITY_AES_CCMP &&
2569 sec_type != HTT_SECURITY_TKIP &&
2570 sec_type != HTT_SECURITY_TKIP_NOMIC) {
2571 spin_unlock_bh(&ar->data_lock);
2572 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2573 HTT_RX_NON_PN_CHECK,
2574 HTT_RX_NON_TKIP_MIC);
2577 last_pn = &peer->frag_tids_last_pn[tid];
2580 if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl))
2583 last_pn->pn48 = new_pn.pn48;
2584 peer->frag_tids_seq[tid] = seq;
2585 } else if (sec_type == HTT_SECURITY_AES_CCMP) {
2586 if (seq != peer->frag_tids_seq[tid])
2589 if (new_pn.pn48 != last_pn->pn48 + 1)
2592 last_pn->pn48 = new_pn.pn48;
2593 last_pn = &peer->tids_last_pn[tid];
2594 last_pn->pn48 = new_pn.pn48;
2597 spin_unlock_bh(&ar->data_lock);
2599 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2600 HTT_RX_NON_PN_CHECK, tkip_mic);
2603 spin_unlock_bh(&ar->data_lock);
2605 /* Tell the caller that it must free the skb since we have not
2611 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
2612 struct htt_rx_indication *rx)
2614 struct ath10k *ar = htt->ar;
2615 struct htt_rx_indication_mpdu_range *mpdu_ranges;
2616 int num_mpdu_ranges;
2617 int i, mpdu_count = 0;
2621 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2622 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2623 peer_id = __le16_to_cpu(rx->hdr.peer_id);
2624 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2626 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
2628 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
2629 rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges));
2631 for (i = 0; i < num_mpdu_ranges; i++)
2632 mpdu_count += mpdu_ranges[i].mpdu_count;
2634 atomic_add(mpdu_count, &htt->num_mpdus_ready);
2636 ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
2640 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
2641 struct sk_buff *skb)
2643 struct ath10k_htt *htt = &ar->htt;
2644 struct htt_resp *resp = (struct htt_resp *)skb->data;
2645 struct htt_tx_done tx_done = {};
2646 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
2647 __le16 msdu_id, *msdus;
2648 bool rssi_enabled = false;
2649 u8 msdu_count = 0, num_airtime_records, tid;
2651 struct htt_data_tx_compl_ppdu_dur *ppdu_info;
2652 struct ath10k_peer *peer;
2653 u16 ppdu_info_offset = 0, peer_id;
2657 case HTT_DATA_TX_STATUS_NO_ACK:
2658 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2660 case HTT_DATA_TX_STATUS_OK:
2661 tx_done.status = HTT_TX_COMPL_STATE_ACK;
2663 case HTT_DATA_TX_STATUS_DISCARD:
2664 case HTT_DATA_TX_STATUS_POSTPONE:
2665 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
2666 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2669 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
2670 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2674 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
2675 resp->data_tx_completion.num_msdus);
2677 msdu_count = resp->data_tx_completion.num_msdus;
2678 msdus = resp->data_tx_completion.msdus;
2679 rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp);
2682 htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params,
2685 for (i = 0; i < msdu_count; i++) {
2687 tx_done.msdu_id = __le16_to_cpu(msdu_id);
2690 /* Total no of MSDUs should be even,
2691 * if odd MSDUs are sent firmware fills
2692 * last msdu id with 0xffff
2694 if (msdu_count & 0x01) {
2695 msdu_id = msdus[msdu_count + i + 1 + htt_pad];
2696 tx_done.ack_rssi = __le16_to_cpu(msdu_id);
2698 msdu_id = msdus[msdu_count + i + htt_pad];
2699 tx_done.ack_rssi = __le16_to_cpu(msdu_id);
2703 /* kfifo_put: In practice firmware shouldn't fire off per-CE
2704 * interrupt and main interrupt (MSI/-X range case) for the same
2705 * HTC service so it should be safe to use kfifo_put w/o lock.
2707 * From kfifo_put() documentation:
2708 * Note that with only one concurrent reader and one concurrent
2709 * writer, you don't need extra locking to use these macro.
2711 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
2712 ath10k_txrx_tx_unref(htt, &tx_done);
2713 } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
2714 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
2715 tx_done.msdu_id, tx_done.status);
2716 ath10k_txrx_tx_unref(htt, &tx_done);
2720 if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT))
2723 ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count;
2726 ppdu_info_offset += ppdu_info_offset;
2728 if (resp->data_tx_completion.flags2 &
2729 (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT))
2730 ppdu_info_offset += 2;
2732 ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset];
2733 num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK,
2734 __le32_to_cpu(ppdu_info->info0));
2736 for (i = 0; i < num_airtime_records; i++) {
2737 struct htt_data_tx_ppdu_dur *ppdu_dur;
2740 ppdu_dur = &ppdu_info->ppdu_dur[i];
2741 info0 = __le32_to_cpu(ppdu_dur->info0);
2743 peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK,
2746 spin_lock_bh(&ar->data_lock);
2748 peer = ath10k_peer_find_by_id(ar, peer_id);
2749 if (!peer || !peer->sta) {
2750 spin_unlock_bh(&ar->data_lock);
2755 tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) &
2756 IEEE80211_QOS_CTL_TID_MASK;
2757 tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);
2759 ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
2761 spin_unlock_bh(&ar->data_lock);
2766 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
2768 struct htt_rx_addba *ev = &resp->rx_addba;
2769 struct ath10k_peer *peer;
2770 struct ath10k_vif *arvif;
2771 u16 info0, tid, peer_id;
2773 info0 = __le16_to_cpu(ev->info0);
2774 tid = MS(info0, HTT_RX_BA_INFO0_TID);
2775 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
2777 ath10k_dbg(ar, ATH10K_DBG_HTT,
2778 "htt rx addba tid %hu peer_id %hu size %hhu\n",
2779 tid, peer_id, ev->window_size);
2781 spin_lock_bh(&ar->data_lock);
2782 peer = ath10k_peer_find_by_id(ar, peer_id);
2784 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
2786 spin_unlock_bh(&ar->data_lock);
2790 arvif = ath10k_get_arvif(ar, peer->vdev_id);
2792 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
2794 spin_unlock_bh(&ar->data_lock);
2798 ath10k_dbg(ar, ATH10K_DBG_HTT,
2799 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
2800 peer->addr, tid, ev->window_size);
2802 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
2803 spin_unlock_bh(&ar->data_lock);
2806 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
2808 struct htt_rx_delba *ev = &resp->rx_delba;
2809 struct ath10k_peer *peer;
2810 struct ath10k_vif *arvif;
2811 u16 info0, tid, peer_id;
2813 info0 = __le16_to_cpu(ev->info0);
2814 tid = MS(info0, HTT_RX_BA_INFO0_TID);
2815 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
2817 ath10k_dbg(ar, ATH10K_DBG_HTT,
2818 "htt rx delba tid %hu peer_id %hu\n",
2821 spin_lock_bh(&ar->data_lock);
2822 peer = ath10k_peer_find_by_id(ar, peer_id);
2824 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
2826 spin_unlock_bh(&ar->data_lock);
2830 arvif = ath10k_get_arvif(ar, peer->vdev_id);
2832 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
2834 spin_unlock_bh(&ar->data_lock);
2838 ath10k_dbg(ar, ATH10K_DBG_HTT,
2839 "htt rx stop rx ba session sta %pM tid %hu\n",
2842 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
2843 spin_unlock_bh(&ar->data_lock);
2846 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
2847 struct sk_buff_head *amsdu)
2849 struct sk_buff *msdu;
2850 struct htt_rx_desc *rxd;
2852 if (skb_queue_empty(list))
2855 if (WARN_ON(!skb_queue_empty(amsdu)))
2858 while ((msdu = __skb_dequeue(list))) {
2859 __skb_queue_tail(amsdu, msdu);
2861 rxd = (void *)msdu->data - sizeof(*rxd);
2862 if (rxd->msdu_end.common.info0 &
2863 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
2867 msdu = skb_peek_tail(amsdu);
2868 rxd = (void *)msdu->data - sizeof(*rxd);
2869 if (!(rxd->msdu_end.common.info0 &
2870 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
2871 skb_queue_splice_init(amsdu, list);
2878 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
2879 struct sk_buff *skb)
2881 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2883 if (!ieee80211_has_protected(hdr->frame_control))
2886 /* Offloaded frames are already decrypted but firmware insists they are
2887 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
2888 * will drop the frame.
2891 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2892 status->flag |= RX_FLAG_DECRYPTED |
2893 RX_FLAG_IV_STRIPPED |
2894 RX_FLAG_MMIC_STRIPPED;
2897 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
2898 struct sk_buff_head *list)
2900 struct ath10k_htt *htt = &ar->htt;
2901 struct ieee80211_rx_status *status = &htt->rx_status;
2902 struct htt_rx_offload_msdu *rx;
2903 struct sk_buff *msdu;
2906 while ((msdu = __skb_dequeue(list))) {
2907 /* Offloaded frames don't have Rx descriptor. Instead they have
2908 * a short meta information header.
2911 rx = (void *)msdu->data;
2913 skb_put(msdu, sizeof(*rx));
2914 skb_pull(msdu, sizeof(*rx));
2916 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
2917 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
2918 dev_kfree_skb_any(msdu);
2922 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
2924 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
2925 * actual payload is unaligned. Align the frame. Otherwise
2926 * mac80211 complains. This shouldn't reduce performance much
2927 * because these offloaded frames are rare.
2929 offset = 4 - ((unsigned long)msdu->data & 3);
2930 skb_put(msdu, offset);
2931 memmove(msdu->data + offset, msdu->data, msdu->len);
2932 skb_pull(msdu, offset);
2934 /* FIXME: The frame is NWifi. Re-construct QoS Control
2935 * if possible later.
2938 memset(status, 0, sizeof(*status));
2939 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2941 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
2942 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
2943 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
2947 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
2949 struct ath10k_htt *htt = &ar->htt;
2950 struct htt_resp *resp = (void *)skb->data;
2951 struct ieee80211_rx_status *status = &htt->rx_status;
2952 struct sk_buff_head list;
2953 struct sk_buff_head amsdu;
2962 lockdep_assert_held(&htt->rx_ring.lock);
2964 if (htt->rx_confused)
2967 skb_pull(skb, sizeof(resp->hdr));
2968 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
2970 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
2971 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
2972 vdev_id = resp->rx_in_ord_ind.vdev_id;
2973 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
2974 offload = !!(resp->rx_in_ord_ind.info &
2975 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
2976 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
2978 ath10k_dbg(ar, ATH10K_DBG_HTT,
2979 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
2980 vdev_id, peer_id, tid, offload, frag, msdu_count);
2982 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
2983 ath10k_warn(ar, "dropping invalid in order rx indication\n");
2987 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
2988 * extracted and processed.
2990 __skb_queue_head_init(&list);
2991 if (ar->hw_params.target_64bit)
2992 ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
2995 ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
2999 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
3000 htt->rx_confused = true;
3004 /* Offloaded frames are very different and need to be handled
3008 ath10k_htt_rx_h_rx_offload(ar, &list);
3010 while (!skb_queue_empty(&list)) {
3011 __skb_queue_head_init(&amsdu);
3012 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
3015 /* Note: The in-order indication may report interleaved
3016 * frames from different PPDUs meaning reported rx rate
3017 * to mac80211 isn't accurate/reliable. It's still
3018 * better to report something than nothing though. This
3019 * should still give an idea about rx rate to the user.
3021 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
3022 ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
3023 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
3025 ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
3030 /* Should not happen. */
3031 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
3032 htt->rx_confused = true;
3033 __skb_queue_purge(&list);
3040 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
3041 const __le32 *resp_ids,
3047 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
3050 for (i = 0; i < num_resp_ids; i++) {
3051 resp_id = le32_to_cpu(resp_ids[i]);
3053 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
3056 /* TODO: free resp_id */
3060 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
3062 struct ieee80211_hw *hw = ar->hw;
3063 struct ieee80211_txq *txq;
3064 struct htt_resp *resp = (struct htt_resp *)skb->data;
3065 struct htt_tx_fetch_record *record;
3067 size_t max_num_bytes;
3068 size_t max_num_msdus;
3071 const __le32 *resp_ids;
3080 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
3082 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
3083 if (unlikely(skb->len < len)) {
3084 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
3088 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
3089 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
3091 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
3092 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
3094 if (unlikely(skb->len < len)) {
3095 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
3099 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
3100 num_records, num_resp_ids,
3101 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
3103 if (!ar->htt.tx_q_state.enabled) {
3104 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
3108 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
3109 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
3115 for (i = 0; i < num_records; i++) {
3116 record = &resp->tx_fetch_ind.records[i];
3117 peer_id = MS(le16_to_cpu(record->info),
3118 HTT_TX_FETCH_RECORD_INFO_PEER_ID);
3119 tid = MS(le16_to_cpu(record->info),
3120 HTT_TX_FETCH_RECORD_INFO_TID);
3121 max_num_msdus = le16_to_cpu(record->num_msdus);
3122 max_num_bytes = le32_to_cpu(record->num_bytes);
3124 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
3125 i, peer_id, tid, max_num_msdus, max_num_bytes);
3127 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3128 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3129 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
3134 spin_lock_bh(&ar->data_lock);
3135 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3136 spin_unlock_bh(&ar->data_lock);
3138 /* It is okay to release the lock and use txq because RCU read
3142 if (unlikely(!txq)) {
3143 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
3151 ieee80211_txq_schedule_start(hw, txq->ac);
3152 may_tx = ieee80211_txq_may_transmit(hw, txq);
3153 while (num_msdus < max_num_msdus &&
3154 num_bytes < max_num_bytes) {
3158 ret = ath10k_mac_tx_push_txq(hw, txq);
3165 ieee80211_return_txq(hw, txq, false);
3166 ieee80211_txq_schedule_end(hw, txq->ac);
3168 record->num_msdus = cpu_to_le16(num_msdus);
3169 record->num_bytes = cpu_to_le32(num_bytes);
3171 ath10k_htt_tx_txq_recalc(hw, txq);
3176 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
3177 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
3179 ret = ath10k_htt_tx_fetch_resp(ar,
3180 resp->tx_fetch_ind.token,
3181 resp->tx_fetch_ind.fetch_seq_num,
3182 resp->tx_fetch_ind.records,
3184 if (unlikely(ret)) {
3185 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
3186 le32_to_cpu(resp->tx_fetch_ind.token), ret);
3187 /* FIXME: request fw restart */
3190 ath10k_htt_tx_txq_sync(ar);
3193 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
3194 struct sk_buff *skb)
3196 const struct htt_resp *resp = (void *)skb->data;
3200 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
3202 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
3203 if (unlikely(skb->len < len)) {
3204 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
3208 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
3209 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
3211 if (unlikely(skb->len < len)) {
3212 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
3216 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
3217 resp->tx_fetch_confirm.resp_ids,
3221 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
3222 struct sk_buff *skb)
3224 const struct htt_resp *resp = (void *)skb->data;
3225 const struct htt_tx_mode_switch_record *record;
3226 struct ieee80211_txq *txq;
3227 struct ath10k_txq *artxq;
3230 enum htt_tx_mode_switch_mode mode;
3239 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
3241 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
3242 if (unlikely(skb->len < len)) {
3243 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
3247 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
3248 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
3250 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
3251 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3252 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
3253 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3255 ath10k_dbg(ar, ATH10K_DBG_HTT,
3256 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
3257 info0, info1, enable, num_records, mode, threshold);
3259 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
3261 if (unlikely(skb->len < len)) {
3262 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
3267 case HTT_TX_MODE_SWITCH_PUSH:
3268 case HTT_TX_MODE_SWITCH_PUSH_PULL:
3271 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
3279 ar->htt.tx_q_state.enabled = enable;
3280 ar->htt.tx_q_state.mode = mode;
3281 ar->htt.tx_q_state.num_push_allowed = threshold;
3285 for (i = 0; i < num_records; i++) {
3286 record = &resp->tx_mode_switch_ind.records[i];
3287 info0 = le16_to_cpu(record->info0);
3288 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
3289 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
3291 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3292 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3293 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
3298 spin_lock_bh(&ar->data_lock);
3299 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3300 spin_unlock_bh(&ar->data_lock);
3302 /* It is okay to release the lock and use txq because RCU read
3306 if (unlikely(!txq)) {
3307 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
3312 spin_lock_bh(&ar->htt.tx_lock);
3313 artxq = (void *)txq->drv_priv;
3314 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
3315 spin_unlock_bh(&ar->htt.tx_lock);
3320 ath10k_mac_tx_push_pending(ar);
3323 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
3327 release = ath10k_htt_t2h_msg_handler(ar, skb);
3329 /* Free the indication buffer */
3331 dev_kfree_skb_any(skb);
3334 static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
3336 static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
3337 18, 24, 36, 48, 54};
3340 for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
3341 if (rate == legacy_rates[i])
3345 ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate);
3350 ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
3351 struct ath10k_sta *arsta,
3352 struct ath10k_per_peer_tx_stats *pstats,
3355 struct rate_info *txrate = &arsta->txrate;
3356 struct ath10k_htt_tx_stats *tx_stats;
3357 int idx, ht_idx, gi, mcs, bw, nss;
3358 unsigned long flags;
3360 if (!arsta->tx_stats)
3363 tx_stats = arsta->tx_stats;
3364 flags = txrate->flags;
3365 gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags);
3366 mcs = ATH10K_HW_MCS_RATE(pstats->ratecode);
3369 ht_idx = mcs + (nss - 1) * 8;
3370 idx = mcs * 8 + 8 * 10 * (nss - 1);
3373 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
3375 if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
3376 STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
3377 STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
3378 STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
3379 STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
3380 STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
3381 STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
3382 } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3383 STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
3384 STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
3385 STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
3386 STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
3387 STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
3388 STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
3390 mcs = legacy_rate_idx;
3392 STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
3393 STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
3394 STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
3395 STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
3396 STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
3397 STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
3400 if (ATH10K_HW_AMPDU(pstats->flags)) {
3401 tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
3403 if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3404 STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
3405 pstats->succ_bytes + pstats->retry_bytes;
3406 STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
3407 pstats->succ_pkts + pstats->retry_pkts;
3409 STATS_OP_FMT(AMPDU).vht[0][mcs] +=
3410 pstats->succ_bytes + pstats->retry_bytes;
3411 STATS_OP_FMT(AMPDU).vht[1][mcs] +=
3412 pstats->succ_pkts + pstats->retry_pkts;
3414 STATS_OP_FMT(AMPDU).bw[0][bw] +=
3415 pstats->succ_bytes + pstats->retry_bytes;
3416 STATS_OP_FMT(AMPDU).nss[0][nss - 1] +=
3417 pstats->succ_bytes + pstats->retry_bytes;
3418 STATS_OP_FMT(AMPDU).gi[0][gi] +=
3419 pstats->succ_bytes + pstats->retry_bytes;
3420 STATS_OP_FMT(AMPDU).rate_table[0][idx] +=
3421 pstats->succ_bytes + pstats->retry_bytes;
3422 STATS_OP_FMT(AMPDU).bw[1][bw] +=
3423 pstats->succ_pkts + pstats->retry_pkts;
3424 STATS_OP_FMT(AMPDU).nss[1][nss - 1] +=
3425 pstats->succ_pkts + pstats->retry_pkts;
3426 STATS_OP_FMT(AMPDU).gi[1][gi] +=
3427 pstats->succ_pkts + pstats->retry_pkts;
3428 STATS_OP_FMT(AMPDU).rate_table[1][idx] +=
3429 pstats->succ_pkts + pstats->retry_pkts;
3431 tx_stats->ack_fails +=
3432 ATH10K_HW_BA_FAIL(pstats->flags);
3435 STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
3436 STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes;
3437 STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
3439 STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
3440 STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts;
3441 STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
3443 STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
3444 STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes;
3445 STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
3447 STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
3448 STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts;
3449 STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
3451 STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
3452 STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes;
3453 STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
3455 STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
3456 STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts;
3457 STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
3459 if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
3460 STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;
3461 STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;
3462 STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;
3463 STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;
3464 STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
3465 STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
3468 tx_stats->tx_duration += pstats->duration;
3472 ath10k_update_per_peer_tx_stats(struct ath10k *ar,
3473 struct ieee80211_sta *sta,
3474 struct ath10k_per_peer_tx_stats *peer_stats)
3476 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
3477 struct ieee80211_chanctx_conf *conf = NULL;
3480 bool skip_auto_rate;
3481 struct rate_info txrate;
3483 lockdep_assert_held(&ar->data_lock);
3485 txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
3486 txrate.bw = ATH10K_HW_BW(peer_stats->flags);
3487 txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
3488 txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
3489 sgi = ATH10K_HW_GI(peer_stats->flags);
3490 skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags);
3492 /* Firmware's rate control skips broadcast/management frames,
3493 * if host has configure fixed rates and in some other special cases.
3498 if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
3499 ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
3503 if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
3504 (txrate.mcs > 7 || txrate.nss < 1)) {
3505 ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
3506 txrate.mcs, txrate.nss);
3510 memset(&arsta->txrate, 0, sizeof(arsta->txrate));
3511 memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
3512 if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
3513 txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
3514 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
3515 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
3516 if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
3518 rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
3521 arsta->txrate.legacy = rate;
3522 } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
3523 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
3524 arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
3526 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
3527 arsta->txrate.mcs = txrate.mcs;
3530 switch (txrate.flags) {
3531 case WMI_RATE_PREAMBLE_OFDM:
3532 if (arsta->arvif && arsta->arvif->vif)
3533 conf = rcu_dereference(arsta->arvif->vif->chanctx_conf);
3534 if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
3535 arsta->tx_info.status.rates[0].idx = rate_idx - 4;
3537 case WMI_RATE_PREAMBLE_CCK:
3538 arsta->tx_info.status.rates[0].idx = rate_idx;
3540 arsta->tx_info.status.rates[0].flags |=
3541 (IEEE80211_TX_RC_USE_SHORT_PREAMBLE |
3542 IEEE80211_TX_RC_SHORT_GI);
3544 case WMI_RATE_PREAMBLE_HT:
3545 arsta->tx_info.status.rates[0].idx =
3546 txrate.mcs + ((txrate.nss - 1) * 8);
3548 arsta->tx_info.status.rates[0].flags |=
3549 IEEE80211_TX_RC_SHORT_GI;
3550 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
3552 case WMI_RATE_PREAMBLE_VHT:
3553 ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0],
3554 txrate.mcs, txrate.nss);
3556 arsta->tx_info.status.rates[0].flags |=
3557 IEEE80211_TX_RC_SHORT_GI;
3558 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
3562 arsta->txrate.nss = txrate.nss;
3563 arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
3564 arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate);
3566 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
3568 switch (arsta->txrate.bw) {
3569 case RATE_INFO_BW_40:
3570 arsta->tx_info.status.rates[0].flags |=
3571 IEEE80211_TX_RC_40_MHZ_WIDTH;
3573 case RATE_INFO_BW_80:
3574 arsta->tx_info.status.rates[0].flags |=
3575 IEEE80211_TX_RC_80_MHZ_WIDTH;
3579 if (peer_stats->succ_pkts) {
3580 arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
3581 arsta->tx_info.status.rates[0].count = 1;
3582 ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
3585 if (ar->htt.disable_tx_comp) {
3586 arsta->tx_failed += peer_stats->failed_pkts;
3587 ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",
3591 arsta->tx_retries += peer_stats->retry_pkts;
3592 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);
3594 if (ath10k_debug_is_extd_tx_stats_enabled(ar))
3595 ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
3599 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
3600 struct sk_buff *skb)
3602 struct htt_resp *resp = (struct htt_resp *)skb->data;
3603 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
3604 struct htt_per_peer_tx_stats_ind *tx_stats;
3605 struct ieee80211_sta *sta;
3606 struct ath10k_peer *peer;
3608 u8 ppdu_len, num_ppdu;
3610 num_ppdu = resp->peer_tx_stats.num_ppdu;
3611 ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
3613 if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
3614 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
3618 tx_stats = (struct htt_per_peer_tx_stats_ind *)
3619 (resp->peer_tx_stats.payload);
3620 peer_id = __le16_to_cpu(tx_stats->peer_id);
3623 spin_lock_bh(&ar->data_lock);
3624 peer = ath10k_peer_find_by_id(ar, peer_id);
3625 if (!peer || !peer->sta) {
3626 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
3632 for (i = 0; i < num_ppdu; i++) {
3633 tx_stats = (struct htt_per_peer_tx_stats_ind *)
3634 (resp->peer_tx_stats.payload + i * ppdu_len);
3636 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
3637 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
3638 p_tx_stats->failed_bytes =
3639 __le32_to_cpu(tx_stats->failed_bytes);
3640 p_tx_stats->ratecode = tx_stats->ratecode;
3641 p_tx_stats->flags = tx_stats->flags;
3642 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
3643 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
3644 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
3645 p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration);
3647 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
3651 spin_unlock_bh(&ar->data_lock);
3655 static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
3657 struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
3658 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
3659 struct ath10k_10_2_peer_tx_stats *tx_stats;
3660 struct ieee80211_sta *sta;
3661 struct ath10k_peer *peer;
3662 u16 log_type = __le16_to_cpu(hdr->log_type);
3665 if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
3668 tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
3669 ATH10K_10_2_TX_STATS_OFFSET);
3671 if (!tx_stats->tx_ppdu_cnt)
3674 peer_id = tx_stats->peer_id;
3677 spin_lock_bh(&ar->data_lock);
3678 peer = ath10k_peer_find_by_id(ar, peer_id);
3679 if (!peer || !peer->sta) {
3680 ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
3686 for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
3687 p_tx_stats->succ_bytes =
3688 __le16_to_cpu(tx_stats->success_bytes[i]);
3689 p_tx_stats->retry_bytes =
3690 __le16_to_cpu(tx_stats->retry_bytes[i]);
3691 p_tx_stats->failed_bytes =
3692 __le16_to_cpu(tx_stats->failed_bytes[i]);
3693 p_tx_stats->ratecode = tx_stats->ratecode[i];
3694 p_tx_stats->flags = tx_stats->flags[i];
3695 p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
3696 p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
3697 p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
3699 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
3701 spin_unlock_bh(&ar->data_lock);
3707 spin_unlock_bh(&ar->data_lock);
3711 static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type)
3714 case HTT_SECURITY_TKIP:
3715 case HTT_SECURITY_TKIP_NOMIC:
3716 case HTT_SECURITY_AES_CCMP:
3723 static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar,
3724 struct htt_security_indication *ev)
3726 enum htt_txrx_sec_cast_type sec_index;
3727 enum htt_security_types sec_type;
3728 struct ath10k_peer *peer;
3730 spin_lock_bh(&ar->data_lock);
3732 peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id));
3734 ath10k_warn(ar, "failed to find peer id %d for security indication",
3735 __le16_to_cpu(ev->peer_id));
3739 sec_type = MS(ev->flags, HTT_SECURITY_TYPE);
3741 if (ev->flags & HTT_SECURITY_IS_UNICAST)
3742 sec_index = HTT_TXRX_SEC_UCAST;
3744 sec_index = HTT_TXRX_SEC_MCAST;
3746 peer->rx_pn[sec_index].sec_type = sec_type;
3747 peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type);
3749 memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid));
3750 memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn));
3753 spin_unlock_bh(&ar->data_lock);
3756 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
3758 struct ath10k_htt *htt = &ar->htt;
3759 struct htt_resp *resp = (struct htt_resp *)skb->data;
3760 enum htt_t2h_msg_type type;
3762 /* confirm alignment */
3763 if (!IS_ALIGNED((unsigned long)skb->data, 4))
3764 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
3766 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
3767 resp->hdr.msg_type);
3769 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
3770 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
3771 resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
3774 type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
3777 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
3778 htt->target_version_major = resp->ver_resp.major;
3779 htt->target_version_minor = resp->ver_resp.minor;
3780 complete(&htt->target_version_received);
3783 case HTT_T2H_MSG_TYPE_RX_IND:
3784 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
3785 ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
3787 skb_queue_tail(&htt->rx_indication_head, skb);
3791 case HTT_T2H_MSG_TYPE_PEER_MAP: {
3792 struct htt_peer_map_event ev = {
3793 .vdev_id = resp->peer_map.vdev_id,
3794 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
3796 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
3797 ath10k_peer_map_event(htt, &ev);
3800 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
3801 struct htt_peer_unmap_event ev = {
3802 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
3804 ath10k_peer_unmap_event(htt, &ev);
3807 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
3808 struct htt_tx_done tx_done = {};
3809 struct ath10k_htt *htt = &ar->htt;
3810 struct ath10k_htc *htc = &ar->htc;
3811 struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
3812 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
3813 int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
3815 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
3818 case HTT_MGMT_TX_STATUS_OK:
3819 tx_done.status = HTT_TX_COMPL_STATE_ACK;
3820 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
3822 (resp->mgmt_tx_completion.flags &
3823 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
3825 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
3829 case HTT_MGMT_TX_STATUS_RETRY:
3830 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
3832 case HTT_MGMT_TX_STATUS_DROP:
3833 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
3837 if (htt->disable_tx_comp) {
3838 spin_lock_bh(&htc->tx_lock);
3840 spin_unlock_bh(&htc->tx_lock);
3843 status = ath10k_txrx_tx_unref(htt, &tx_done);
3845 spin_lock_bh(&htt->tx_lock);
3846 ath10k_htt_tx_mgmt_dec_pending(htt);
3847 spin_unlock_bh(&htt->tx_lock);
3851 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
3852 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
3854 case HTT_T2H_MSG_TYPE_SEC_IND: {
3855 struct ath10k *ar = htt->ar;
3856 struct htt_security_indication *ev = &resp->security_indication;
3858 ath10k_htt_rx_sec_ind_handler(ar, ev);
3859 ath10k_dbg(ar, ATH10K_DBG_HTT,
3860 "sec ind peer_id %d unicast %d type %d\n",
3861 __le16_to_cpu(ev->peer_id),
3862 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
3863 MS(ev->flags, HTT_SECURITY_TYPE));
3864 complete(&ar->install_key_done);
3867 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
3868 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
3869 skb->data, skb->len);
3870 atomic_inc(&htt->num_mpdus_ready);
3872 return ath10k_htt_rx_proc_rx_frag_ind(htt,
3877 case HTT_T2H_MSG_TYPE_TEST:
3879 case HTT_T2H_MSG_TYPE_STATS_CONF:
3880 trace_ath10k_htt_stats(ar, skb->data, skb->len);
3882 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
3883 /* Firmware can return tx frames if it's unable to fully
3884 * process them and suspects host may be able to fix it. ath10k
3885 * sends all tx frames as already inspected so this shouldn't
3886 * happen unless fw has a bug.
3888 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
3890 case HTT_T2H_MSG_TYPE_RX_ADDBA:
3891 ath10k_htt_rx_addba(ar, resp);
3893 case HTT_T2H_MSG_TYPE_RX_DELBA:
3894 ath10k_htt_rx_delba(ar, resp);
3896 case HTT_T2H_MSG_TYPE_PKTLOG: {
3897 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
3899 offsetof(struct htt_resp,
3900 pktlog_msg.payload));
3902 if (ath10k_peer_stats_enabled(ar))
3903 ath10k_fetch_10_2_tx_stats(ar,
3904 resp->pktlog_msg.payload);
3907 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
3908 /* Ignore this event because mac80211 takes care of Rx
3909 * aggregation reordering.
3913 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
3914 skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
3917 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: {
3918 struct ath10k_htt *htt = &ar->htt;
3919 struct ath10k_htc *htc = &ar->htc;
3920 struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
3921 u32 msg_word = __le32_to_cpu(*(__le32 *)resp);
3922 int htt_credit_delta;
3924 htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word);
3925 if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word))
3926 htt_credit_delta = -htt_credit_delta;
3928 ath10k_dbg(ar, ATH10K_DBG_HTT,
3929 "htt credit update delta %d\n",
3932 if (htt->disable_tx_comp) {
3933 spin_lock_bh(&htc->tx_lock);
3934 ep->tx_credits += htt_credit_delta;
3935 spin_unlock_bh(&htc->tx_lock);
3936 ath10k_dbg(ar, ATH10K_DBG_HTT,
3937 "htt credit total %d\n",
3939 ep->ep_ops.ep_tx_credits(htc->ar);
3943 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
3944 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
3945 u32 freq = __le32_to_cpu(resp->chan_change.freq);
3947 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
3948 ath10k_dbg(ar, ATH10K_DBG_HTT,
3949 "htt chan change freq %u phymode %s\n",
3950 freq, ath10k_wmi_phymode_str(phymode));
3953 case HTT_T2H_MSG_TYPE_AGGR_CONF:
3955 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
3956 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
3958 if (!tx_fetch_ind) {
3959 ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
3962 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
3965 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
3966 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
3968 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
3969 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
3971 case HTT_T2H_MSG_TYPE_PEER_STATS:
3972 ath10k_htt_fetch_peer_stats(ar, skb);
3974 case HTT_T2H_MSG_TYPE_EN_STATS:
3976 ath10k_warn(ar, "htt event (%d) not handled\n",
3977 resp->hdr.msg_type);
3978 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
3979 skb->data, skb->len);
3984 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
3986 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
3987 struct sk_buff *skb)
3989 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
3990 dev_kfree_skb_any(skb);
3992 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
3994 static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
3996 struct sk_buff *skb;
3998 while (quota < budget) {
3999 if (skb_queue_empty(&ar->htt.rx_msdus_q))
4002 skb = skb_dequeue(&ar->htt.rx_msdus_q);
4005 ath10k_process_rx(ar, skb);
4012 int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
4014 struct htt_resp *resp;
4015 struct ath10k_htt *htt = &ar->htt;
4016 struct sk_buff *skb;
4020 for (quota = 0; quota < budget; quota++) {
4021 skb = skb_dequeue(&htt->rx_indication_head);
4025 resp = (struct htt_resp *)skb->data;
4027 release = ath10k_htt_rx_proc_rx_ind_hl(htt,
4031 HTT_RX_NON_TKIP_MIC);
4034 dev_kfree_skb_any(skb);
4036 ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
4037 skb_queue_len(&htt->rx_indication_head));
4041 EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
4043 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
4045 struct ath10k_htt *htt = &ar->htt;
4046 struct htt_tx_done tx_done = {};
4047 struct sk_buff_head tx_ind_q;
4048 struct sk_buff *skb;
4049 unsigned long flags;
4050 int quota = 0, done, ret;
4051 bool resched_napi = false;
4053 __skb_queue_head_init(&tx_ind_q);
4055 /* Process pending frames before dequeuing more data
4058 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
4059 if (quota == budget) {
4060 resched_napi = true;
4064 while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
4065 spin_lock_bh(&htt->rx_ring.lock);
4066 ret = ath10k_htt_rx_in_ord_ind(ar, skb);
4067 spin_unlock_bh(&htt->rx_ring.lock);
4069 dev_kfree_skb_any(skb);
4071 resched_napi = true;
4076 while (atomic_read(&htt->num_mpdus_ready)) {
4077 ret = ath10k_htt_rx_handle_amsdu(htt);
4079 resched_napi = true;
4082 atomic_dec(&htt->num_mpdus_ready);
4085 /* Deliver received data after processing data from hardware */
4086 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
4088 /* From NAPI documentation:
4089 * The napi poll() function may also process TX completions, in which
4090 * case if it processes the entire TX ring then it should count that
4091 * work as the rest of the budget.
4093 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
4096 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
4097 * From kfifo_get() documentation:
4098 * Note that with only one concurrent reader and one concurrent writer,
4099 * you don't need extra locking to use these macro.
4101 while (kfifo_get(&htt->txdone_fifo, &tx_done))
4102 ath10k_txrx_tx_unref(htt, &tx_done);
4104 ath10k_mac_tx_push_pending(ar);
4106 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
4107 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
4108 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
4110 while ((skb = __skb_dequeue(&tx_ind_q))) {
4111 ath10k_htt_rx_tx_fetch_ind(ar, skb);
4112 dev_kfree_skb_any(skb);
4116 ath10k_htt_rx_msdu_buff_replenish(htt);
4117 /* In case of rx failure or more data to read, report budget
4118 * to reschedule NAPI poll
4120 done = resched_napi ? budget : quota;
4124 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
4126 static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
4127 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
4128 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
4129 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
4130 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
4131 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
4134 static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
4135 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
4136 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
4137 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
4138 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
4139 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
4142 static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
4143 .htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl,
4146 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
4148 struct ath10k *ar = htt->ar;
4150 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
4151 htt->rx_ops = &htt_rx_ops_hl;
4152 else if (ar->hw_params.target_64bit)
4153 htt->rx_ops = &htt_rx_ops_64;
4155 htt->rx_ops = &htt_rx_ops_32;