atlantic: Fix driver resume flow.
[linux-2.6-microblaze.git] / drivers / net / wireless / ath / ath11k / dp_rx.c
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  */
5
6 #include <linux/ieee80211.h>
7 #include <linux/kernel.h>
8 #include <linux/skbuff.h>
9 #include <crypto/hash.h>
10 #include "core.h"
11 #include "debug.h"
12 #include "debugfs_htt_stats.h"
13 #include "debugfs_sta.h"
14 #include "hal_desc.h"
15 #include "hw.h"
16 #include "dp_rx.h"
17 #include "hal_rx.h"
18 #include "dp_tx.h"
19 #include "peer.h"
20
21 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
22
23 static u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
24 {
25         return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
26 }
27
28 static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
29                                                                struct hal_rx_desc *desc)
30 {
31         if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
32                 return HAL_ENCRYPT_TYPE_OPEN;
33
34         return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
35 }
36
37 static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
38                                                struct hal_rx_desc *desc)
39 {
40         return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
41 }
42
43 static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
44                                                      struct hal_rx_desc *desc)
45 {
46         return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
47 }
48
49 static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
50                                                      struct hal_rx_desc *desc)
51 {
52         return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
53 }
54
55 static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
56                                                struct hal_rx_desc *desc)
57 {
58         return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
59 }
60
61 static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
62                                                  struct sk_buff *skb)
63 {
64         struct ieee80211_hdr *hdr;
65
66         hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
67         return ieee80211_has_morefrags(hdr->frame_control);
68 }
69
70 static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
71                                              struct sk_buff *skb)
72 {
73         struct ieee80211_hdr *hdr;
74
75         hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
76         return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
77 }
78
79 static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
80                                             struct hal_rx_desc *desc)
81 {
82         return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
83 }
84
85 static void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
86                                         struct hal_rx_desc *desc)
87 {
88         return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
89 }
90
91 static bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
92 {
93         return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
94                            __le32_to_cpu(attn->info2));
95 }
96
97 static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
98 {
99         return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
100                            __le32_to_cpu(attn->info1));
101 }
102
103 static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
104 {
105         return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
106                            __le32_to_cpu(attn->info1));
107 }
108
109 static bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
110 {
111         return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
112                           __le32_to_cpu(attn->info2)) ==
113                 RX_DESC_DECRYPT_STATUS_CODE_OK);
114 }
115
116 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
117 {
118         u32 info = __le32_to_cpu(attn->info1);
119         u32 errmap = 0;
120
121         if (info & RX_ATTENTION_INFO1_FCS_ERR)
122                 errmap |= DP_RX_MPDU_ERR_FCS;
123
124         if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
125                 errmap |= DP_RX_MPDU_ERR_DECRYPT;
126
127         if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
128                 errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
129
130         if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
131                 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
132
133         if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
134                 errmap |= DP_RX_MPDU_ERR_OVERFLOW;
135
136         if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
137                 errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
138
139         if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
140                 errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
141
142         return errmap;
143 }
144
145 static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
146                                               struct hal_rx_desc *desc)
147 {
148         return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
149 }
150
151 static u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
152                                         struct hal_rx_desc *desc)
153 {
154         return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
155 }
156
157 static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
158                                              struct hal_rx_desc *desc)
159 {
160         return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
161 }
162
163 static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
164                                           struct hal_rx_desc *desc)
165 {
166         return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
167 }
168
169 static u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
170                                           struct hal_rx_desc *desc)
171 {
172         return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
173 }
174
175 static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
176                                              struct hal_rx_desc *desc)
177 {
178         return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
179 }
180
181 static u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
182                                         struct hal_rx_desc *desc)
183 {
184         return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
185 }
186
187 static u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
188                                         struct hal_rx_desc *desc)
189 {
190         return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
191 }
192
193 static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
194                                              struct hal_rx_desc *desc)
195 {
196         return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
197 }
198
199 static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
200                                         struct hal_rx_desc *desc)
201 {
202         return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
203 }
204
205 static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
206                                                struct hal_rx_desc *desc)
207 {
208         return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
209 }
210
211 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,
212                                               struct hal_rx_desc *desc)
213 {
214         return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);
215 }
216
217 static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
218                                            struct hal_rx_desc *fdesc,
219                                            struct hal_rx_desc *ldesc)
220 {
221         ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
222 }
223
224 static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
225 {
226         return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
227                          __le32_to_cpu(attn->info1));
228 }
229
230 static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
231                                          struct hal_rx_desc *rx_desc)
232 {
233         u8 *rx_pkt_hdr;
234
235         rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);
236
237         return rx_pkt_hdr;
238 }
239
240 static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
241                                         struct hal_rx_desc *rx_desc)
242 {
243         u32 tlv_tag;
244
245         tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);
246
247         return tlv_tag == HAL_RX_MPDU_START;
248 }
249
250 static u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
251                                        struct hal_rx_desc *rx_desc)
252 {
253         return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
254 }
255
256 static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
257                                           struct hal_rx_desc *desc,
258                                           u16 len)
259 {
260         ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
261 }
262
263 static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
264                                         struct hal_rx_desc *desc)
265 {
266         struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
267
268         return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
269                 (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
270                  __le32_to_cpu(attn->info1)));
271 }
272
273 static void ath11k_dp_service_mon_ring(struct timer_list *t)
274 {
275         struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
276         int i;
277
278         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
279                 ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
280
281         mod_timer(&ab->mon_reap_timer, jiffies +
282                   msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
283 }
284
285 static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
286 {
287         int i, reaped = 0;
288         unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
289
290         do {
291                 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
292                         reaped += ath11k_dp_rx_process_mon_rings(ab, i,
293                                                                  NULL,
294                                                                  DP_MON_SERVICE_BUDGET);
295
296                 /* nothing more to reap */
297                 if (reaped < DP_MON_SERVICE_BUDGET)
298                         return 0;
299
300         } while (time_before(jiffies, timeout));
301
302         ath11k_warn(ab, "dp mon ring purge timeout");
303
304         return -ETIMEDOUT;
305 }
306
307 /* Returns number of Rx buffers replenished */
308 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
309                                struct dp_rxdma_ring *rx_ring,
310                                int req_entries,
311                                enum hal_rx_buf_return_buf_manager mgr)
312 {
313         struct hal_srng *srng;
314         u32 *desc;
315         struct sk_buff *skb;
316         int num_free;
317         int num_remain;
318         int buf_id;
319         u32 cookie;
320         dma_addr_t paddr;
321
322         req_entries = min(req_entries, rx_ring->bufs_max);
323
324         srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
325
326         spin_lock_bh(&srng->lock);
327
328         ath11k_hal_srng_access_begin(ab, srng);
329
330         num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
331         if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
332                 req_entries = num_free;
333
334         req_entries = min(num_free, req_entries);
335         num_remain = req_entries;
336
337         while (num_remain > 0) {
338                 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
339                                     DP_RX_BUFFER_ALIGN_SIZE);
340                 if (!skb)
341                         break;
342
343                 if (!IS_ALIGNED((unsigned long)skb->data,
344                                 DP_RX_BUFFER_ALIGN_SIZE)) {
345                         skb_pull(skb,
346                                  PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
347                                  skb->data);
348                 }
349
350                 paddr = dma_map_single(ab->dev, skb->data,
351                                        skb->len + skb_tailroom(skb),
352                                        DMA_FROM_DEVICE);
353                 if (dma_mapping_error(ab->dev, paddr))
354                         goto fail_free_skb;
355
356                 spin_lock_bh(&rx_ring->idr_lock);
357                 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
358                                    rx_ring->bufs_max * 3, GFP_ATOMIC);
359                 spin_unlock_bh(&rx_ring->idr_lock);
360                 if (buf_id < 0)
361                         goto fail_dma_unmap;
362
363                 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
364                 if (!desc)
365                         goto fail_idr_remove;
366
367                 ATH11K_SKB_RXCB(skb)->paddr = paddr;
368
369                 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
370                          FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
371
372                 num_remain--;
373
374                 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
375         }
376
377         ath11k_hal_srng_access_end(ab, srng);
378
379         spin_unlock_bh(&srng->lock);
380
381         return req_entries - num_remain;
382
383 fail_idr_remove:
384         spin_lock_bh(&rx_ring->idr_lock);
385         idr_remove(&rx_ring->bufs_idr, buf_id);
386         spin_unlock_bh(&rx_ring->idr_lock);
387 fail_dma_unmap:
388         dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
389                          DMA_FROM_DEVICE);
390 fail_free_skb:
391         dev_kfree_skb_any(skb);
392
393         ath11k_hal_srng_access_end(ab, srng);
394
395         spin_unlock_bh(&srng->lock);
396
397         return req_entries - num_remain;
398 }
399
400 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
401                                          struct dp_rxdma_ring *rx_ring)
402 {
403         struct ath11k_pdev_dp *dp = &ar->dp;
404         struct sk_buff *skb;
405         int buf_id;
406
407         spin_lock_bh(&rx_ring->idr_lock);
408         idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
409                 idr_remove(&rx_ring->bufs_idr, buf_id);
410                 /* TODO: Understand where internal driver does this dma_unmap
411                  * of rxdma_buffer.
412                  */
413                 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
414                                  skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
415                 dev_kfree_skb_any(skb);
416         }
417
418         idr_destroy(&rx_ring->bufs_idr);
419         spin_unlock_bh(&rx_ring->idr_lock);
420
421         /* if rxdma1_enable is false, mon_status_refill_ring
422          * isn't setup, so don't clean.
423          */
424         if (!ar->ab->hw_params.rxdma1_enable)
425                 return 0;
426
427         rx_ring = &dp->rx_mon_status_refill_ring[0];
428
429         spin_lock_bh(&rx_ring->idr_lock);
430         idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
431                 idr_remove(&rx_ring->bufs_idr, buf_id);
432                 /* XXX: Understand where internal driver does this dma_unmap
433                  * of rxdma_buffer.
434                  */
435                 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
436                                  skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL);
437                 dev_kfree_skb_any(skb);
438         }
439
440         idr_destroy(&rx_ring->bufs_idr);
441         spin_unlock_bh(&rx_ring->idr_lock);
442
443         return 0;
444 }
445
446 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
447 {
448         struct ath11k_pdev_dp *dp = &ar->dp;
449         struct ath11k_base *ab = ar->ab;
450         struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
451         int i;
452
453         ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
454
455         rx_ring = &dp->rxdma_mon_buf_ring;
456         ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
457
458         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
459                 rx_ring = &dp->rx_mon_status_refill_ring[i];
460                 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
461         }
462
463         return 0;
464 }
465
466 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
467                                           struct dp_rxdma_ring *rx_ring,
468                                           u32 ringtype)
469 {
470         struct ath11k_pdev_dp *dp = &ar->dp;
471         int num_entries;
472
473         num_entries = rx_ring->refill_buf_ring.size /
474                 ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
475
476         rx_ring->bufs_max = num_entries;
477         ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
478                                    HAL_RX_BUF_RBM_SW3_BM);
479         return 0;
480 }
481
482 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
483 {
484         struct ath11k_pdev_dp *dp = &ar->dp;
485         struct ath11k_base *ab = ar->ab;
486         struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
487         int i;
488
489         ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
490
491         if (ar->ab->hw_params.rxdma1_enable) {
492                 rx_ring = &dp->rxdma_mon_buf_ring;
493                 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
494         }
495
496         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
497                 rx_ring = &dp->rx_mon_status_refill_ring[i];
498                 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
499         }
500
501         return 0;
502 }
503
504 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
505 {
506         struct ath11k_pdev_dp *dp = &ar->dp;
507         struct ath11k_base *ab = ar->ab;
508         int i;
509
510         ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
511
512         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
513                 if (ab->hw_params.rx_mac_buf_ring)
514                         ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
515
516                 ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
517                 ath11k_dp_srng_cleanup(ab,
518                                        &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
519         }
520
521         ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
522 }
523
524 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
525 {
526         struct ath11k_dp *dp = &ab->dp;
527         int i;
528
529         for (i = 0; i < DP_REO_DST_RING_MAX; i++)
530                 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
531 }
532
533 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
534 {
535         struct ath11k_dp *dp = &ab->dp;
536         int ret;
537         int i;
538
539         for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
540                 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
541                                            HAL_REO_DST, i, 0,
542                                            DP_REO_DST_RING_SIZE);
543                 if (ret) {
544                         ath11k_warn(ab, "failed to setup reo_dst_ring\n");
545                         goto err_reo_cleanup;
546                 }
547         }
548
549         return 0;
550
551 err_reo_cleanup:
552         ath11k_dp_pdev_reo_cleanup(ab);
553
554         return ret;
555 }
556
557 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
558 {
559         struct ath11k_pdev_dp *dp = &ar->dp;
560         struct ath11k_base *ab = ar->ab;
561         struct dp_srng *srng = NULL;
562         int i;
563         int ret;
564
565         ret = ath11k_dp_srng_setup(ar->ab,
566                                    &dp->rx_refill_buf_ring.refill_buf_ring,
567                                    HAL_RXDMA_BUF, 0,
568                                    dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
569         if (ret) {
570                 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
571                 return ret;
572         }
573
574         if (ar->ab->hw_params.rx_mac_buf_ring) {
575                 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
576                         ret = ath11k_dp_srng_setup(ar->ab,
577                                                    &dp->rx_mac_buf_ring[i],
578                                                    HAL_RXDMA_BUF, 1,
579                                                    dp->mac_id + i, 1024);
580                         if (ret) {
581                                 ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
582                                             i);
583                                 return ret;
584                         }
585                 }
586         }
587
588         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
589                 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
590                                            HAL_RXDMA_DST, 0, dp->mac_id + i,
591                                            DP_RXDMA_ERR_DST_RING_SIZE);
592                 if (ret) {
593                         ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
594                         return ret;
595                 }
596         }
597
598         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
599                 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
600                 ret = ath11k_dp_srng_setup(ar->ab,
601                                            srng,
602                                            HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
603                                            DP_RXDMA_MON_STATUS_RING_SIZE);
604                 if (ret) {
605                         ath11k_warn(ar->ab,
606                                     "failed to setup rx_mon_status_refill_ring %d\n", i);
607                         return ret;
608                 }
609         }
610
611         /* if rxdma1_enable is false, then it doesn't need
612          * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
613          * and rxdma_mon_desc_ring.
614          * init reap timer for QCA6390.
615          */
616         if (!ar->ab->hw_params.rxdma1_enable) {
617                 //init mon status buffer reap timer
618                 timer_setup(&ar->ab->mon_reap_timer,
619                             ath11k_dp_service_mon_ring, 0);
620                 return 0;
621         }
622
623         ret = ath11k_dp_srng_setup(ar->ab,
624                                    &dp->rxdma_mon_buf_ring.refill_buf_ring,
625                                    HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
626                                    DP_RXDMA_MONITOR_BUF_RING_SIZE);
627         if (ret) {
628                 ath11k_warn(ar->ab,
629                             "failed to setup HAL_RXDMA_MONITOR_BUF\n");
630                 return ret;
631         }
632
633         ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
634                                    HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
635                                    DP_RXDMA_MONITOR_DST_RING_SIZE);
636         if (ret) {
637                 ath11k_warn(ar->ab,
638                             "failed to setup HAL_RXDMA_MONITOR_DST\n");
639                 return ret;
640         }
641
642         ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
643                                    HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
644                                    DP_RXDMA_MONITOR_DESC_RING_SIZE);
645         if (ret) {
646                 ath11k_warn(ar->ab,
647                             "failed to setup HAL_RXDMA_MONITOR_DESC\n");
648                 return ret;
649         }
650
651         return 0;
652 }
653
654 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
655 {
656         struct ath11k_dp *dp = &ab->dp;
657         struct dp_reo_cmd *cmd, *tmp;
658         struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
659
660         spin_lock_bh(&dp->reo_cmd_lock);
661         list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
662                 list_del(&cmd->list);
663                 dma_unmap_single(ab->dev, cmd->data.paddr,
664                                  cmd->data.size, DMA_BIDIRECTIONAL);
665                 kfree(cmd->data.vaddr);
666                 kfree(cmd);
667         }
668
669         list_for_each_entry_safe(cmd_cache, tmp_cache,
670                                  &dp->reo_cmd_cache_flush_list, list) {
671                 list_del(&cmd_cache->list);
672                 dp->reo_cmd_cache_flush_count--;
673                 dma_unmap_single(ab->dev, cmd_cache->data.paddr,
674                                  cmd_cache->data.size, DMA_BIDIRECTIONAL);
675                 kfree(cmd_cache->data.vaddr);
676                 kfree(cmd_cache);
677         }
678         spin_unlock_bh(&dp->reo_cmd_lock);
679 }
680
681 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
682                                    enum hal_reo_cmd_status status)
683 {
684         struct dp_rx_tid *rx_tid = ctx;
685
686         if (status != HAL_REO_CMD_SUCCESS)
687                 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
688                             rx_tid->tid, status);
689
690         dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
691                          DMA_BIDIRECTIONAL);
692         kfree(rx_tid->vaddr);
693 }
694
695 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
696                                       struct dp_rx_tid *rx_tid)
697 {
698         struct ath11k_hal_reo_cmd cmd = {0};
699         unsigned long tot_desc_sz, desc_sz;
700         int ret;
701
702         tot_desc_sz = rx_tid->size;
703         desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
704
705         while (tot_desc_sz > desc_sz) {
706                 tot_desc_sz -= desc_sz;
707                 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
708                 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
709                 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
710                                                 HAL_REO_CMD_FLUSH_CACHE, &cmd,
711                                                 NULL);
712                 if (ret)
713                         ath11k_warn(ab,
714                                     "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
715                                     rx_tid->tid, ret);
716         }
717
718         memset(&cmd, 0, sizeof(cmd));
719         cmd.addr_lo = lower_32_bits(rx_tid->paddr);
720         cmd.addr_hi = upper_32_bits(rx_tid->paddr);
721         cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
722         ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
723                                         HAL_REO_CMD_FLUSH_CACHE,
724                                         &cmd, ath11k_dp_reo_cmd_free);
725         if (ret) {
726                 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
727                            rx_tid->tid, ret);
728                 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
729                                  DMA_BIDIRECTIONAL);
730                 kfree(rx_tid->vaddr);
731         }
732 }
733
734 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
735                                       enum hal_reo_cmd_status status)
736 {
737         struct ath11k_base *ab = dp->ab;
738         struct dp_rx_tid *rx_tid = ctx;
739         struct dp_reo_cache_flush_elem *elem, *tmp;
740
741         if (status == HAL_REO_CMD_DRAIN) {
742                 goto free_desc;
743         } else if (status != HAL_REO_CMD_SUCCESS) {
744                 /* Shouldn't happen! Cleanup in case of other failure? */
745                 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
746                             rx_tid->tid, status);
747                 return;
748         }
749
750         elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
751         if (!elem)
752                 goto free_desc;
753
754         elem->ts = jiffies;
755         memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
756
757         spin_lock_bh(&dp->reo_cmd_lock);
758         list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
759         dp->reo_cmd_cache_flush_count++;
760
761         /* Flush and invalidate aged REO desc from HW cache */
762         list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
763                                  list) {
764                 if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
765                     time_after(jiffies, elem->ts +
766                                msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
767                         list_del(&elem->list);
768                         dp->reo_cmd_cache_flush_count--;
769                         spin_unlock_bh(&dp->reo_cmd_lock);
770
771                         ath11k_dp_reo_cache_flush(ab, &elem->data);
772                         kfree(elem);
773                         spin_lock_bh(&dp->reo_cmd_lock);
774                 }
775         }
776         spin_unlock_bh(&dp->reo_cmd_lock);
777
778         return;
779 free_desc:
780         dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
781                          DMA_BIDIRECTIONAL);
782         kfree(rx_tid->vaddr);
783 }
784
785 void ath11k_peer_rx_tid_delete(struct ath11k *ar,
786                                struct ath11k_peer *peer, u8 tid)
787 {
788         struct ath11k_hal_reo_cmd cmd = {0};
789         struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
790         int ret;
791
792         if (!rx_tid->active)
793                 return;
794
795         cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
796         cmd.addr_lo = lower_32_bits(rx_tid->paddr);
797         cmd.addr_hi = upper_32_bits(rx_tid->paddr);
798         cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
799         ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
800                                         HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
801                                         ath11k_dp_rx_tid_del_func);
802         if (ret) {
803                 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
804                            tid, ret);
805                 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
806                                  DMA_BIDIRECTIONAL);
807                 kfree(rx_tid->vaddr);
808         }
809
810         rx_tid->active = false;
811 }
812
813 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
814                                          u32 *link_desc,
815                                          enum hal_wbm_rel_bm_act action)
816 {
817         struct ath11k_dp *dp = &ab->dp;
818         struct hal_srng *srng;
819         u32 *desc;
820         int ret = 0;
821
822         srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
823
824         spin_lock_bh(&srng->lock);
825
826         ath11k_hal_srng_access_begin(ab, srng);
827
828         desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
829         if (!desc) {
830                 ret = -ENOBUFS;
831                 goto exit;
832         }
833
834         ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
835                                          action);
836
837 exit:
838         ath11k_hal_srng_access_end(ab, srng);
839
840         spin_unlock_bh(&srng->lock);
841
842         return ret;
843 }
844
845 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
846 {
847         struct ath11k_base *ab = rx_tid->ab;
848
849         lockdep_assert_held(&ab->base_lock);
850
851         if (rx_tid->dst_ring_desc) {
852                 if (rel_link_desc)
853                         ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
854                                                       HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
855                 kfree(rx_tid->dst_ring_desc);
856                 rx_tid->dst_ring_desc = NULL;
857         }
858
859         rx_tid->cur_sn = 0;
860         rx_tid->last_frag_no = 0;
861         rx_tid->rx_frag_bitmap = 0;
862         __skb_queue_purge(&rx_tid->rx_frags);
863 }
864
865 void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
866 {
867         struct dp_rx_tid *rx_tid;
868         int i;
869
870         lockdep_assert_held(&ar->ab->base_lock);
871
872         for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
873                 rx_tid = &peer->rx_tid[i];
874
875                 spin_unlock_bh(&ar->ab->base_lock);
876                 del_timer_sync(&rx_tid->frag_timer);
877                 spin_lock_bh(&ar->ab->base_lock);
878
879                 ath11k_dp_rx_frags_cleanup(rx_tid, true);
880         }
881 }
882
883 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
884 {
885         struct dp_rx_tid *rx_tid;
886         int i;
887
888         lockdep_assert_held(&ar->ab->base_lock);
889
890         for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
891                 rx_tid = &peer->rx_tid[i];
892
893                 ath11k_peer_rx_tid_delete(ar, peer, i);
894                 ath11k_dp_rx_frags_cleanup(rx_tid, true);
895
896                 spin_unlock_bh(&ar->ab->base_lock);
897                 del_timer_sync(&rx_tid->frag_timer);
898                 spin_lock_bh(&ar->ab->base_lock);
899         }
900 }
901
902 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
903                                          struct ath11k_peer *peer,
904                                          struct dp_rx_tid *rx_tid,
905                                          u32 ba_win_sz, u16 ssn,
906                                          bool update_ssn)
907 {
908         struct ath11k_hal_reo_cmd cmd = {0};
909         int ret;
910
911         cmd.addr_lo = lower_32_bits(rx_tid->paddr);
912         cmd.addr_hi = upper_32_bits(rx_tid->paddr);
913         cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
914         cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
915         cmd.ba_window_size = ba_win_sz;
916
917         if (update_ssn) {
918                 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
919                 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
920         }
921
922         ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
923                                         HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
924                                         NULL);
925         if (ret) {
926                 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
927                             rx_tid->tid, ret);
928                 return ret;
929         }
930
931         rx_tid->ba_win_sz = ba_win_sz;
932
933         return 0;
934 }
935
936 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
937                                       const u8 *peer_mac, int vdev_id, u8 tid)
938 {
939         struct ath11k_peer *peer;
940         struct dp_rx_tid *rx_tid;
941
942         spin_lock_bh(&ab->base_lock);
943
944         peer = ath11k_peer_find(ab, vdev_id, peer_mac);
945         if (!peer) {
946                 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
947                 goto unlock_exit;
948         }
949
950         rx_tid = &peer->rx_tid[tid];
951         if (!rx_tid->active)
952                 goto unlock_exit;
953
954         dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
955                          DMA_BIDIRECTIONAL);
956         kfree(rx_tid->vaddr);
957
958         rx_tid->active = false;
959
960 unlock_exit:
961         spin_unlock_bh(&ab->base_lock);
962 }
963
964 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
965                              u8 tid, u32 ba_win_sz, u16 ssn,
966                              enum hal_pn_type pn_type)
967 {
968         struct ath11k_base *ab = ar->ab;
969         struct ath11k_peer *peer;
970         struct dp_rx_tid *rx_tid;
971         u32 hw_desc_sz;
972         u32 *addr_aligned;
973         void *vaddr;
974         dma_addr_t paddr;
975         int ret;
976
977         spin_lock_bh(&ab->base_lock);
978
979         peer = ath11k_peer_find(ab, vdev_id, peer_mac);
980         if (!peer) {
981                 ath11k_warn(ab, "failed to find the peer to set up rx tid\n");
982                 spin_unlock_bh(&ab->base_lock);
983                 return -ENOENT;
984         }
985
986         rx_tid = &peer->rx_tid[tid];
987         /* Update the tid queue if it is already setup */
988         if (rx_tid->active) {
989                 paddr = rx_tid->paddr;
990                 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
991                                                     ba_win_sz, ssn, true);
992                 spin_unlock_bh(&ab->base_lock);
993                 if (ret) {
994                         ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid);
995                         return ret;
996                 }
997
998                 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
999                                                              peer_mac, paddr,
1000                                                              tid, 1, ba_win_sz);
1001                 if (ret)
1002                         ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n",
1003                                     tid, ret);
1004                 return ret;
1005         }
1006
1007         rx_tid->tid = tid;
1008
1009         rx_tid->ba_win_sz = ba_win_sz;
1010
1011         /* TODO: Optimize the memory allocation for qos tid based on
1012          * the actual BA window size in REO tid update path.
1013          */
1014         if (tid == HAL_DESC_REO_NON_QOS_TID)
1015                 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
1016         else
1017                 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
1018
1019         vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
1020         if (!vaddr) {
1021                 spin_unlock_bh(&ab->base_lock);
1022                 return -ENOMEM;
1023         }
1024
1025         addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
1026
1027         ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
1028                                    ssn, pn_type);
1029
1030         paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
1031                                DMA_BIDIRECTIONAL);
1032
1033         ret = dma_mapping_error(ab->dev, paddr);
1034         if (ret) {
1035                 spin_unlock_bh(&ab->base_lock);
1036                 goto err_mem_free;
1037         }
1038
1039         rx_tid->vaddr = vaddr;
1040         rx_tid->paddr = paddr;
1041         rx_tid->size = hw_desc_sz;
1042         rx_tid->active = true;
1043
1044         spin_unlock_bh(&ab->base_lock);
1045
1046         ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1047                                                      paddr, tid, 1, ba_win_sz);
1048         if (ret) {
1049                 ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n",
1050                             tid, ret);
1051                 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
1052         }
1053
1054         return ret;
1055
1056 err_mem_free:
1057         kfree(vaddr);
1058
1059         return ret;
1060 }
1061
1062 int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
1063                              struct ieee80211_ampdu_params *params)
1064 {
1065         struct ath11k_base *ab = ar->ab;
1066         struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
1067         int vdev_id = arsta->arvif->vdev_id;
1068         int ret;
1069
1070         ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
1071                                        params->tid, params->buf_size,
1072                                        params->ssn, arsta->pn_type);
1073         if (ret)
1074                 ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
1075
1076         return ret;
1077 }
1078
1079 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
1080                             struct ieee80211_ampdu_params *params)
1081 {
1082         struct ath11k_base *ab = ar->ab;
1083         struct ath11k_peer *peer;
1084         struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
1085         int vdev_id = arsta->arvif->vdev_id;
1086         dma_addr_t paddr;
1087         bool active;
1088         int ret;
1089
1090         spin_lock_bh(&ab->base_lock);
1091
1092         peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
1093         if (!peer) {
1094                 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1095                 spin_unlock_bh(&ab->base_lock);
1096                 return -ENOENT;
1097         }
1098
1099         paddr = peer->rx_tid[params->tid].paddr;
1100         active = peer->rx_tid[params->tid].active;
1101
1102         if (!active) {
1103                 spin_unlock_bh(&ab->base_lock);
1104                 return 0;
1105         }
1106
1107         ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1108         spin_unlock_bh(&ab->base_lock);
1109         if (ret) {
1110                 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1111                             params->tid, ret);
1112                 return ret;
1113         }
1114
1115         ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1116                                                      params->sta->addr, paddr,
1117                                                      params->tid, 1, 1);
1118         if (ret)
1119                 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
1120                             ret);
1121
1122         return ret;
1123 }
1124
1125 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
1126                                        const u8 *peer_addr,
1127                                        enum set_key_cmd key_cmd,
1128                                        struct ieee80211_key_conf *key)
1129 {
1130         struct ath11k *ar = arvif->ar;
1131         struct ath11k_base *ab = ar->ab;
1132         struct ath11k_hal_reo_cmd cmd = {0};
1133         struct ath11k_peer *peer;
1134         struct dp_rx_tid *rx_tid;
1135         u8 tid;
1136         int ret = 0;
1137
1138         /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1139          * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1140          * for now.
1141          */
1142         if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1143                 return 0;
1144
1145         cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
1146         cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
1147                     HAL_REO_CMD_UPD0_PN_SIZE |
1148                     HAL_REO_CMD_UPD0_PN_VALID |
1149                     HAL_REO_CMD_UPD0_PN_CHECK |
1150                     HAL_REO_CMD_UPD0_SVLD;
1151
1152         switch (key->cipher) {
1153         case WLAN_CIPHER_SUITE_TKIP:
1154         case WLAN_CIPHER_SUITE_CCMP:
1155         case WLAN_CIPHER_SUITE_CCMP_256:
1156         case WLAN_CIPHER_SUITE_GCMP:
1157         case WLAN_CIPHER_SUITE_GCMP_256:
1158                 if (key_cmd == SET_KEY) {
1159                         cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1160                         cmd.pn_size = 48;
1161                 }
1162                 break;
1163         default:
1164                 break;
1165         }
1166
1167         spin_lock_bh(&ab->base_lock);
1168
1169         peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
1170         if (!peer) {
1171                 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
1172                 spin_unlock_bh(&ab->base_lock);
1173                 return -ENOENT;
1174         }
1175
1176         for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1177                 rx_tid = &peer->rx_tid[tid];
1178                 if (!rx_tid->active)
1179                         continue;
1180                 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1181                 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1182                 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
1183                                                 HAL_REO_CMD_UPDATE_RX_QUEUE,
1184                                                 &cmd, NULL);
1185                 if (ret) {
1186                         ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
1187                                     tid, ret);
1188                         break;
1189                 }
1190         }
1191
1192         spin_unlock_bh(&ab->base_lock);
1193
1194         return ret;
1195 }
1196
1197 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1198                                              u16 peer_id)
1199 {
1200         int i;
1201
1202         for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1203                 if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1204                         if (peer_id == ppdu_stats->user_stats[i].peer_id)
1205                                 return i;
1206                 } else {
1207                         return i;
1208                 }
1209         }
1210
1211         return -EINVAL;
1212 }
1213
1214 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
1215                                            u16 tag, u16 len, const void *ptr,
1216                                            void *data)
1217 {
1218         struct htt_ppdu_stats_info *ppdu_info;
1219         struct htt_ppdu_user_stats *user_stats;
1220         int cur_user;
1221         u16 peer_id;
1222
1223         ppdu_info = (struct htt_ppdu_stats_info *)data;
1224
1225         switch (tag) {
1226         case HTT_PPDU_STATS_TAG_COMMON:
1227                 if (len < sizeof(struct htt_ppdu_stats_common)) {
1228                         ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1229                                     len, tag);
1230                         return -EINVAL;
1231                 }
1232                 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
1233                        sizeof(struct htt_ppdu_stats_common));
1234                 break;
1235         case HTT_PPDU_STATS_TAG_USR_RATE:
1236                 if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1237                         ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1238                                     len, tag);
1239                         return -EINVAL;
1240                 }
1241
1242                 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1243                 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1244                                                       peer_id);
1245                 if (cur_user < 0)
1246                         return -EINVAL;
1247                 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1248                 user_stats->peer_id = peer_id;
1249                 user_stats->is_valid_peer_id = true;
1250                 memcpy((void *)&user_stats->rate, ptr,
1251                        sizeof(struct htt_ppdu_stats_user_rate));
1252                 user_stats->tlv_flags |= BIT(tag);
1253                 break;
1254         case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1255                 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1256                         ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1257                                     len, tag);
1258                         return -EINVAL;
1259                 }
1260
1261                 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1262                 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1263                                                       peer_id);
1264                 if (cur_user < 0)
1265                         return -EINVAL;
1266                 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1267                 user_stats->peer_id = peer_id;
1268                 user_stats->is_valid_peer_id = true;
1269                 memcpy((void *)&user_stats->cmpltn_cmn, ptr,
1270                        sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1271                 user_stats->tlv_flags |= BIT(tag);
1272                 break;
1273         case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1274                 if (len <
1275                     sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1276                         ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1277                                     len, tag);
1278                         return -EINVAL;
1279                 }
1280
1281                 peer_id =
1282                 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1283                 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1284                                                       peer_id);
1285                 if (cur_user < 0)
1286                         return -EINVAL;
1287                 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1288                 user_stats->peer_id = peer_id;
1289                 user_stats->is_valid_peer_id = true;
1290                 memcpy((void *)&user_stats->ack_ba, ptr,
1291                        sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1292                 user_stats->tlv_flags |= BIT(tag);
1293                 break;
1294         }
1295         return 0;
1296 }
1297
1298 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
1299                            int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
1300                                        const void *ptr, void *data),
1301                            void *data)
1302 {
1303         const struct htt_tlv *tlv;
1304         const void *begin = ptr;
1305         u16 tlv_tag, tlv_len;
1306         int ret = -EINVAL;
1307
1308         while (len > 0) {
1309                 if (len < sizeof(*tlv)) {
1310                         ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1311                                    ptr - begin, len, sizeof(*tlv));
1312                         return -EINVAL;
1313                 }
1314                 tlv = (struct htt_tlv *)ptr;
1315                 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
1316                 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
1317                 ptr += sizeof(*tlv);
1318                 len -= sizeof(*tlv);
1319
1320                 if (tlv_len > len) {
1321                         ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1322                                    tlv_tag, ptr - begin, len, tlv_len);
1323                         return -EINVAL;
1324                 }
1325                 ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1326                 if (ret == -ENOMEM)
1327                         return ret;
1328
1329                 ptr += tlv_len;
1330                 len -= tlv_len;
1331         }
1332         return 0;
1333 }
1334
1335 static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi)
1336 {
1337         u32 ret = 0;
1338
1339         switch (sgi) {
1340         case RX_MSDU_START_SGI_0_8_US:
1341                 ret = NL80211_RATE_INFO_HE_GI_0_8;
1342                 break;
1343         case RX_MSDU_START_SGI_1_6_US:
1344                 ret = NL80211_RATE_INFO_HE_GI_1_6;
1345                 break;
1346         case RX_MSDU_START_SGI_3_2_US:
1347                 ret = NL80211_RATE_INFO_HE_GI_3_2;
1348                 break;
1349         }
1350
1351         return ret;
1352 }
1353
1354 static void
1355 ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1356                                 struct htt_ppdu_stats *ppdu_stats, u8 user)
1357 {
1358         struct ath11k_base *ab = ar->ab;
1359         struct ath11k_peer *peer;
1360         struct ieee80211_sta *sta;
1361         struct ath11k_sta *arsta;
1362         struct htt_ppdu_stats_user_rate *user_rate;
1363         struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1364         struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1365         struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1366         int ret;
1367         u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1368         u32 succ_bytes = 0;
1369         u16 rate = 0, succ_pkts = 0;
1370         u32 tx_duration = 0;
1371         u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1372         bool is_ampdu = false;
1373
1374         if (!usr_stats)
1375                 return;
1376
1377         if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1378                 return;
1379
1380         if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1381                 is_ampdu =
1382                         HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1383
1384         if (usr_stats->tlv_flags &
1385             BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1386                 succ_bytes = usr_stats->ack_ba.success_bytes;
1387                 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1388                                       usr_stats->ack_ba.info);
1389                 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
1390                                 usr_stats->ack_ba.info);
1391         }
1392
1393         if (common->fes_duration_us)
1394                 tx_duration = common->fes_duration_us;
1395
1396         user_rate = &usr_stats->rate;
1397         flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1398         bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1399         nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1400         mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1401         sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1402         dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1403
1404         /* Note: If host configured fixed rates and in some other special
1405          * cases, the broadcast/management frames are sent in different rates.
1406          * Firmware rate's control to be skipped for this?
1407          */
1408
1409         if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) {
1410                 ath11k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
1411                 return;
1412         }
1413
1414         if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
1415                 ath11k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
1416                 return;
1417         }
1418
1419         if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
1420                 ath11k_warn(ab, "Invalid VHT mcs %d peer stats",  mcs);
1421                 return;
1422         }
1423
1424         if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
1425                 ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1426                             mcs, nss);
1427                 return;
1428         }
1429
1430         if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1431                 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1432                                                             flags,
1433                                                             &rate_idx,
1434                                                             &rate);
1435                 if (ret < 0)
1436                         return;
1437         }
1438
1439         rcu_read_lock();
1440         spin_lock_bh(&ab->base_lock);
1441         peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1442
1443         if (!peer || !peer->sta) {
1444                 spin_unlock_bh(&ab->base_lock);
1445                 rcu_read_unlock();
1446                 return;
1447         }
1448
1449         sta = peer->sta;
1450         arsta = (struct ath11k_sta *)sta->drv_priv;
1451
1452         memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1453
1454         switch (flags) {
1455         case WMI_RATE_PREAMBLE_OFDM:
1456                 arsta->txrate.legacy = rate;
1457                 break;
1458         case WMI_RATE_PREAMBLE_CCK:
1459                 arsta->txrate.legacy = rate;
1460                 break;
1461         case WMI_RATE_PREAMBLE_HT:
1462                 arsta->txrate.mcs = mcs + 8 * (nss - 1);
1463                 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1464                 if (sgi)
1465                         arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1466                 break;
1467         case WMI_RATE_PREAMBLE_VHT:
1468                 arsta->txrate.mcs = mcs;
1469                 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1470                 if (sgi)
1471                         arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1472                 break;
1473         case WMI_RATE_PREAMBLE_HE:
1474                 arsta->txrate.mcs = mcs;
1475                 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1476                 arsta->txrate.he_dcm = dcm;
1477                 arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
1478                 arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc(
1479                                                 (user_rate->ru_end -
1480                                                  user_rate->ru_start) + 1);
1481                 break;
1482         }
1483
1484         arsta->txrate.nss = nss;
1485         arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1486         arsta->tx_duration += tx_duration;
1487         memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1488
1489         /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1490          * So skip peer stats update for mgmt packets.
1491          */
1492         if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1493                 memset(peer_stats, 0, sizeof(*peer_stats));
1494                 peer_stats->succ_pkts = succ_pkts;
1495                 peer_stats->succ_bytes = succ_bytes;
1496                 peer_stats->is_ampdu = is_ampdu;
1497                 peer_stats->duration = tx_duration;
1498                 peer_stats->ba_fails =
1499                         HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1500                         HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1501
1502                 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
1503                         ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
1504         }
1505
1506         spin_unlock_bh(&ab->base_lock);
1507         rcu_read_unlock();
1508 }
1509
1510 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1511                                          struct htt_ppdu_stats *ppdu_stats)
1512 {
1513         u8 user;
1514
1515         for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1516                 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1517 }
1518
1519 static
1520 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1521                                                         u32 ppdu_id)
1522 {
1523         struct htt_ppdu_stats_info *ppdu_info;
1524
1525         spin_lock_bh(&ar->data_lock);
1526         if (!list_empty(&ar->ppdu_stats_info)) {
1527                 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1528                         if (ppdu_info->ppdu_id == ppdu_id) {
1529                                 spin_unlock_bh(&ar->data_lock);
1530                                 return ppdu_info;
1531                         }
1532                 }
1533
1534                 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1535                         ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1536                                                      typeof(*ppdu_info), list);
1537                         list_del(&ppdu_info->list);
1538                         ar->ppdu_stat_list_depth--;
1539                         ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1540                         kfree(ppdu_info);
1541                 }
1542         }
1543         spin_unlock_bh(&ar->data_lock);
1544
1545         ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1546         if (!ppdu_info)
1547                 return NULL;
1548
1549         spin_lock_bh(&ar->data_lock);
1550         list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1551         ar->ppdu_stat_list_depth++;
1552         spin_unlock_bh(&ar->data_lock);
1553
1554         return ppdu_info;
1555 }
1556
1557 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1558                                       struct sk_buff *skb)
1559 {
1560         struct ath11k_htt_ppdu_stats_msg *msg;
1561         struct htt_ppdu_stats_info *ppdu_info;
1562         struct ath11k *ar;
1563         int ret;
1564         u8 pdev_id;
1565         u32 ppdu_id, len;
1566
1567         msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1568         len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1569         pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1570         ppdu_id = msg->ppdu_id;
1571
1572         rcu_read_lock();
1573         ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1574         if (!ar) {
1575                 ret = -EINVAL;
1576                 goto exit;
1577         }
1578
1579         if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
1580                 trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1581
1582         ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1583         if (!ppdu_info) {
1584                 ret = -EINVAL;
1585                 goto exit;
1586         }
1587
1588         ppdu_info->ppdu_id = ppdu_id;
1589         ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1590                                      ath11k_htt_tlv_ppdu_stats_parse,
1591                                      (void *)ppdu_info);
1592         if (ret) {
1593                 ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1594                 goto exit;
1595         }
1596
1597 exit:
1598         rcu_read_unlock();
1599
1600         return ret;
1601 }
1602
1603 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1604 {
1605         struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1606         struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
1607         struct ath11k *ar;
1608         u8 pdev_id;
1609
1610         pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1611         ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1612         if (!ar) {
1613                 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
1614                 return;
1615         }
1616
1617         trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
1618                                 ar->ab->pktlog_defs_checksum);
1619 }
1620
1621 static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
1622                                                   struct sk_buff *skb)
1623 {
1624         u32 *data = (u32 *)skb->data;
1625         u8 pdev_id, ring_type, ring_id, pdev_idx;
1626         u16 hp, tp;
1627         u32 backpressure_time;
1628         struct ath11k_bp_stats *bp_stats;
1629
1630         pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
1631         ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
1632         ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
1633         ++data;
1634
1635         hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
1636         tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
1637         ++data;
1638
1639         backpressure_time = *data;
1640
1641         ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1642                    pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
1643
1644         if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
1645                 if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
1646                         return;
1647
1648                 bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
1649         } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
1650                 pdev_idx = DP_HW2SW_MACID(pdev_id);
1651
1652                 if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
1653                         return;
1654
1655                 bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
1656         } else {
1657                 ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
1658                             ring_type);
1659                 return;
1660         }
1661
1662         spin_lock_bh(&ab->base_lock);
1663         bp_stats->hp = hp;
1664         bp_stats->tp = tp;
1665         bp_stats->count++;
1666         bp_stats->jiffies = jiffies;
1667         spin_unlock_bh(&ab->base_lock);
1668 }
1669
1670 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1671                                        struct sk_buff *skb)
1672 {
1673         struct ath11k_dp *dp = &ab->dp;
1674         struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1675         enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1676         u16 peer_id;
1677         u8 vdev_id;
1678         u8 mac_addr[ETH_ALEN];
1679         u16 peer_mac_h16;
1680         u16 ast_hash;
1681         u16 hw_peer_id;
1682
1683         ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1684
1685         switch (type) {
1686         case HTT_T2H_MSG_TYPE_VERSION_CONF:
1687                 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1688                                                   resp->version_msg.version);
1689                 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1690                                                   resp->version_msg.version);
1691                 complete(&dp->htt_tgt_version_received);
1692                 break;
1693         case HTT_T2H_MSG_TYPE_PEER_MAP:
1694                 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1695                                     resp->peer_map_ev.info);
1696                 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1697                                     resp->peer_map_ev.info);
1698                 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1699                                          resp->peer_map_ev.info1);
1700                 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1701                                        peer_mac_h16, mac_addr);
1702                 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1703                 break;
1704         case HTT_T2H_MSG_TYPE_PEER_MAP2:
1705                 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1706                                     resp->peer_map_ev.info);
1707                 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1708                                     resp->peer_map_ev.info);
1709                 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1710                                          resp->peer_map_ev.info1);
1711                 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1712                                        peer_mac_h16, mac_addr);
1713                 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
1714                                      resp->peer_map_ev.info2);
1715                 hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
1716                                        resp->peer_map_ev.info1);
1717                 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1718                                       hw_peer_id);
1719                 break;
1720         case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1721         case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1722                 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1723                                     resp->peer_unmap_ev.info);
1724                 ath11k_peer_unmap_event(ab, peer_id);
1725                 break;
1726         case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1727                 ath11k_htt_pull_ppdu_stats(ab, skb);
1728                 break;
1729         case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1730                 ath11k_debugfs_htt_ext_stats_handler(ab, skb);
1731                 break;
1732         case HTT_T2H_MSG_TYPE_PKTLOG:
1733                 ath11k_htt_pktlog(ab, skb);
1734                 break;
1735         case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
1736                 ath11k_htt_backpressure_event_handler(ab, skb);
1737                 break;
1738         default:
1739                 ath11k_warn(ab, "htt event %d not handled\n", type);
1740                 break;
1741         }
1742
1743         dev_kfree_skb_any(skb);
1744 }
1745
1746 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1747                                       struct sk_buff_head *msdu_list,
1748                                       struct sk_buff *first, struct sk_buff *last,
1749                                       u8 l3pad_bytes, int msdu_len)
1750 {
1751         struct ath11k_base *ab = ar->ab;
1752         struct sk_buff *skb;
1753         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1754         int buf_first_hdr_len, buf_first_len;
1755         struct hal_rx_desc *ldesc;
1756         int space_extra, rem_len, buf_len;
1757         u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
1758
1759         /* As the msdu is spread across multiple rx buffers,
1760          * find the offset to the start of msdu for computing
1761          * the length of the msdu in the first buffer.
1762          */
1763         buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1764         buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1765
1766         if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1767                 skb_put(first, buf_first_hdr_len + msdu_len);
1768                 skb_pull(first, buf_first_hdr_len);
1769                 return 0;
1770         }
1771
1772         ldesc = (struct hal_rx_desc *)last->data;
1773         rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);
1774         rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);
1775
1776         /* MSDU spans over multiple buffers because the length of the MSDU
1777          * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1778          * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1779          */
1780         skb_put(first, DP_RX_BUFFER_SIZE);
1781         skb_pull(first, buf_first_hdr_len);
1782
1783         /* When an MSDU spread over multiple buffers attention, MSDU_END and
1784          * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1785          */
1786         ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1787
1788         space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1789         if (space_extra > 0 &&
1790             (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1791                 /* Free up all buffers of the MSDU */
1792                 while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1793                         rxcb = ATH11K_SKB_RXCB(skb);
1794                         if (!rxcb->is_continuation) {
1795                                 dev_kfree_skb_any(skb);
1796                                 break;
1797                         }
1798                         dev_kfree_skb_any(skb);
1799                 }
1800                 return -ENOMEM;
1801         }
1802
1803         rem_len = msdu_len - buf_first_len;
1804         while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1805                 rxcb = ATH11K_SKB_RXCB(skb);
1806                 if (rxcb->is_continuation)
1807                         buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1808                 else
1809                         buf_len = rem_len;
1810
1811                 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1812                         WARN_ON_ONCE(1);
1813                         dev_kfree_skb_any(skb);
1814                         return -EINVAL;
1815                 }
1816
1817                 skb_put(skb, buf_len + hal_rx_desc_sz);
1818                 skb_pull(skb, hal_rx_desc_sz);
1819                 skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1820                                           buf_len);
1821                 dev_kfree_skb_any(skb);
1822
1823                 rem_len -= buf_len;
1824                 if (!rxcb->is_continuation)
1825                         break;
1826         }
1827
1828         return 0;
1829 }
1830
1831 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1832                                                       struct sk_buff *first)
1833 {
1834         struct sk_buff *skb;
1835         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1836
1837         if (!rxcb->is_continuation)
1838                 return first;
1839
1840         skb_queue_walk(msdu_list, skb) {
1841                 rxcb = ATH11K_SKB_RXCB(skb);
1842                 if (!rxcb->is_continuation)
1843                         return skb;
1844         }
1845
1846         return NULL;
1847 }
1848
1849 static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
1850 {
1851         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1852         struct rx_attention *rx_attention;
1853         bool ip_csum_fail, l4_csum_fail;
1854
1855         rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);
1856         ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);
1857         l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);
1858
1859         msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1860                           CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1861 }
1862
1863 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
1864                                        enum hal_encrypt_type enctype)
1865 {
1866         switch (enctype) {
1867         case HAL_ENCRYPT_TYPE_OPEN:
1868         case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1869         case HAL_ENCRYPT_TYPE_TKIP_MIC:
1870                 return 0;
1871         case HAL_ENCRYPT_TYPE_CCMP_128:
1872                 return IEEE80211_CCMP_MIC_LEN;
1873         case HAL_ENCRYPT_TYPE_CCMP_256:
1874                 return IEEE80211_CCMP_256_MIC_LEN;
1875         case HAL_ENCRYPT_TYPE_GCMP_128:
1876         case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1877                 return IEEE80211_GCMP_MIC_LEN;
1878         case HAL_ENCRYPT_TYPE_WEP_40:
1879         case HAL_ENCRYPT_TYPE_WEP_104:
1880         case HAL_ENCRYPT_TYPE_WEP_128:
1881         case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1882         case HAL_ENCRYPT_TYPE_WAPI:
1883                 break;
1884         }
1885
1886         ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1887         return 0;
1888 }
1889
1890 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1891                                          enum hal_encrypt_type enctype)
1892 {
1893         switch (enctype) {
1894         case HAL_ENCRYPT_TYPE_OPEN:
1895                 return 0;
1896         case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1897         case HAL_ENCRYPT_TYPE_TKIP_MIC:
1898                 return IEEE80211_TKIP_IV_LEN;
1899         case HAL_ENCRYPT_TYPE_CCMP_128:
1900                 return IEEE80211_CCMP_HDR_LEN;
1901         case HAL_ENCRYPT_TYPE_CCMP_256:
1902                 return IEEE80211_CCMP_256_HDR_LEN;
1903         case HAL_ENCRYPT_TYPE_GCMP_128:
1904         case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1905                 return IEEE80211_GCMP_HDR_LEN;
1906         case HAL_ENCRYPT_TYPE_WEP_40:
1907         case HAL_ENCRYPT_TYPE_WEP_104:
1908         case HAL_ENCRYPT_TYPE_WEP_128:
1909         case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1910         case HAL_ENCRYPT_TYPE_WAPI:
1911                 break;
1912         }
1913
1914         ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1915         return 0;
1916 }
1917
1918 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1919                                        enum hal_encrypt_type enctype)
1920 {
1921         switch (enctype) {
1922         case HAL_ENCRYPT_TYPE_OPEN:
1923         case HAL_ENCRYPT_TYPE_CCMP_128:
1924         case HAL_ENCRYPT_TYPE_CCMP_256:
1925         case HAL_ENCRYPT_TYPE_GCMP_128:
1926         case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1927                 return 0;
1928         case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1929         case HAL_ENCRYPT_TYPE_TKIP_MIC:
1930                 return IEEE80211_TKIP_ICV_LEN;
1931         case HAL_ENCRYPT_TYPE_WEP_40:
1932         case HAL_ENCRYPT_TYPE_WEP_104:
1933         case HAL_ENCRYPT_TYPE_WEP_128:
1934         case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1935         case HAL_ENCRYPT_TYPE_WAPI:
1936                 break;
1937         }
1938
1939         ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1940         return 0;
1941 }
1942
1943 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1944                                          struct sk_buff *msdu,
1945                                          u8 *first_hdr,
1946                                          enum hal_encrypt_type enctype,
1947                                          struct ieee80211_rx_status *status)
1948 {
1949         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1950         u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1951         struct ieee80211_hdr *hdr;
1952         size_t hdr_len;
1953         u8 da[ETH_ALEN];
1954         u8 sa[ETH_ALEN];
1955         u16 qos_ctl = 0;
1956         u8 *qos;
1957
1958         /* copy SA & DA and pull decapped header */
1959         hdr = (struct ieee80211_hdr *)msdu->data;
1960         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1961         ether_addr_copy(da, ieee80211_get_DA(hdr));
1962         ether_addr_copy(sa, ieee80211_get_SA(hdr));
1963         skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
1964
1965         if (rxcb->is_first_msdu) {
1966                 /* original 802.11 header is valid for the first msdu
1967                  * hence we can reuse the same header
1968                  */
1969                 hdr = (struct ieee80211_hdr *)first_hdr;
1970                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1971
1972                 /* Each A-MSDU subframe will be reported as a separate MSDU,
1973                  * so strip the A-MSDU bit from QoS Ctl.
1974                  */
1975                 if (ieee80211_is_data_qos(hdr->frame_control)) {
1976                         qos = ieee80211_get_qos_ctl(hdr);
1977                         qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1978                 }
1979         } else {
1980                 /*  Rebuild qos header if this is a middle/last msdu */
1981                 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1982
1983                 /* Reset the order bit as the HT_Control header is stripped */
1984                 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
1985
1986                 qos_ctl = rxcb->tid;
1987
1988                 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
1989                         qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
1990
1991                 /* TODO Add other QoS ctl fields when required */
1992
1993                 /* copy decap header before overwriting for reuse below */
1994                 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
1995         }
1996
1997         if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1998                 memcpy(skb_push(msdu,
1999                                 ath11k_dp_rx_crypto_param_len(ar, enctype)),
2000                        (void *)hdr + hdr_len,
2001                        ath11k_dp_rx_crypto_param_len(ar, enctype));
2002         }
2003
2004         if (!rxcb->is_first_msdu) {
2005                 memcpy(skb_push(msdu,
2006                                 IEEE80211_QOS_CTL_LEN), &qos_ctl,
2007                                 IEEE80211_QOS_CTL_LEN);
2008                 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2009                 return;
2010         }
2011
2012         memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2013
2014         /* original 802.11 header has a different DA and in
2015          * case of 4addr it may also have different SA
2016          */
2017         hdr = (struct ieee80211_hdr *)msdu->data;
2018         ether_addr_copy(ieee80211_get_DA(hdr), da);
2019         ether_addr_copy(ieee80211_get_SA(hdr), sa);
2020 }
2021
2022 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
2023                                        enum hal_encrypt_type enctype,
2024                                        struct ieee80211_rx_status *status,
2025                                        bool decrypted)
2026 {
2027         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2028         struct ieee80211_hdr *hdr;
2029         size_t hdr_len;
2030         size_t crypto_len;
2031
2032         if (!rxcb->is_first_msdu ||
2033             !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2034                 WARN_ON_ONCE(1);
2035                 return;
2036         }
2037
2038         skb_trim(msdu, msdu->len - FCS_LEN);
2039
2040         if (!decrypted)
2041                 return;
2042
2043         hdr = (void *)msdu->data;
2044
2045         /* Tail */
2046         if (status->flag & RX_FLAG_IV_STRIPPED) {
2047                 skb_trim(msdu, msdu->len -
2048                          ath11k_dp_rx_crypto_mic_len(ar, enctype));
2049
2050                 skb_trim(msdu, msdu->len -
2051                          ath11k_dp_rx_crypto_icv_len(ar, enctype));
2052         } else {
2053                 /* MIC */
2054                 if (status->flag & RX_FLAG_MIC_STRIPPED)
2055                         skb_trim(msdu, msdu->len -
2056                                  ath11k_dp_rx_crypto_mic_len(ar, enctype));
2057
2058                 /* ICV */
2059                 if (status->flag & RX_FLAG_ICV_STRIPPED)
2060                         skb_trim(msdu, msdu->len -
2061                                  ath11k_dp_rx_crypto_icv_len(ar, enctype));
2062         }
2063
2064         /* MMIC */
2065         if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2066             !ieee80211_has_morefrags(hdr->frame_control) &&
2067             enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2068                 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2069
2070         /* Head */
2071         if (status->flag & RX_FLAG_IV_STRIPPED) {
2072                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2073                 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2074
2075                 memmove((void *)msdu->data + crypto_len,
2076                         (void *)msdu->data, hdr_len);
2077                 skb_pull(msdu, crypto_len);
2078         }
2079 }
2080
2081 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
2082                                          struct sk_buff *msdu,
2083                                          enum hal_encrypt_type enctype)
2084 {
2085         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2086         struct ieee80211_hdr *hdr;
2087         size_t hdr_len, crypto_len;
2088         void *rfc1042;
2089         bool is_amsdu;
2090
2091         is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
2092         hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);
2093         rfc1042 = hdr;
2094
2095         if (rxcb->is_first_msdu) {
2096                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2097                 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2098
2099                 rfc1042 += hdr_len + crypto_len;
2100         }
2101
2102         if (is_amsdu)
2103                 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
2104
2105         return rfc1042;
2106 }
2107
2108 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
2109                                        struct sk_buff *msdu,
2110                                        u8 *first_hdr,
2111                                        enum hal_encrypt_type enctype,
2112                                        struct ieee80211_rx_status *status)
2113 {
2114         struct ieee80211_hdr *hdr;
2115         struct ethhdr *eth;
2116         size_t hdr_len;
2117         u8 da[ETH_ALEN];
2118         u8 sa[ETH_ALEN];
2119         void *rfc1042;
2120
2121         rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
2122         if (WARN_ON_ONCE(!rfc1042))
2123                 return;
2124
2125         /* pull decapped header and copy SA & DA */
2126         eth = (struct ethhdr *)msdu->data;
2127         ether_addr_copy(da, eth->h_dest);
2128         ether_addr_copy(sa, eth->h_source);
2129         skb_pull(msdu, sizeof(struct ethhdr));
2130
2131         /* push rfc1042/llc/snap */
2132         memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
2133                sizeof(struct ath11k_dp_rfc1042_hdr));
2134
2135         /* push original 802.11 header */
2136         hdr = (struct ieee80211_hdr *)first_hdr;
2137         hdr_len = ieee80211_hdrlen(hdr->frame_control);
2138
2139         if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2140                 memcpy(skb_push(msdu,
2141                                 ath11k_dp_rx_crypto_param_len(ar, enctype)),
2142                        (void *)hdr + hdr_len,
2143                        ath11k_dp_rx_crypto_param_len(ar, enctype));
2144         }
2145
2146         memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2147
2148         /* original 802.11 header has a different DA and in
2149          * case of 4addr it may also have different SA
2150          */
2151         hdr = (struct ieee80211_hdr *)msdu->data;
2152         ether_addr_copy(ieee80211_get_DA(hdr), da);
2153         ether_addr_copy(ieee80211_get_SA(hdr), sa);
2154 }
2155
2156 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
2157                                    struct hal_rx_desc *rx_desc,
2158                                    enum hal_encrypt_type enctype,
2159                                    struct ieee80211_rx_status *status,
2160                                    bool decrypted)
2161 {
2162         u8 *first_hdr;
2163         u8 decap;
2164
2165         first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
2166         decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
2167
2168         switch (decap) {
2169         case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2170                 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
2171                                              enctype, status);
2172                 break;
2173         case DP_RX_DECAP_TYPE_RAW:
2174                 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2175                                            decrypted);
2176                 break;
2177         case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2178                 /* TODO undecap support for middle/last msdu's of amsdu */
2179                 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2180                                            enctype, status);
2181                 break;
2182         case DP_RX_DECAP_TYPE_8023:
2183                 /* TODO: Handle undecap for these formats */
2184                 break;
2185         }
2186 }
2187
2188 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
2189                                 struct sk_buff *msdu,
2190                                 struct hal_rx_desc *rx_desc,
2191                                 struct ieee80211_rx_status *rx_status)
2192 {
2193         bool  fill_crypto_hdr, mcast;
2194         enum hal_encrypt_type enctype;
2195         bool is_decrypted = false;
2196         struct ieee80211_hdr *hdr;
2197         struct ath11k_peer *peer;
2198         struct rx_attention *rx_attention;
2199         u32 err_bitmap;
2200
2201         hdr = (struct ieee80211_hdr *)msdu->data;
2202
2203         /* PN for multicast packets will be checked in mac80211 */
2204
2205         mcast = is_multicast_ether_addr(hdr->addr1);
2206         fill_crypto_hdr = mcast;
2207
2208         spin_lock_bh(&ar->ab->base_lock);
2209         peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2);
2210         if (peer) {
2211                 if (mcast)
2212                         enctype = peer->sec_type_grp;
2213                 else
2214                         enctype = peer->sec_type;
2215         } else {
2216                 enctype = HAL_ENCRYPT_TYPE_OPEN;
2217         }
2218         spin_unlock_bh(&ar->ab->base_lock);
2219
2220         rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
2221         err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
2222         if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2223                 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
2224
2225         /* Clear per-MPDU flags while leaving per-PPDU flags intact */
2226         rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2227                              RX_FLAG_MMIC_ERROR |
2228                              RX_FLAG_DECRYPTED |
2229                              RX_FLAG_IV_STRIPPED |
2230                              RX_FLAG_MMIC_STRIPPED);
2231
2232         if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2233                 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2234         if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2235                 rx_status->flag |= RX_FLAG_MMIC_ERROR;
2236
2237         if (is_decrypted) {
2238                 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2239
2240                 if (fill_crypto_hdr)
2241                         rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2242                                         RX_FLAG_ICV_STRIPPED;
2243                 else
2244                         rx_status->flag |= RX_FLAG_IV_STRIPPED |
2245                                            RX_FLAG_PN_VALIDATED;
2246         }
2247
2248         ath11k_dp_rx_h_csum_offload(ar, msdu);
2249         ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2250                                enctype, rx_status, is_decrypted);
2251
2252         if (!is_decrypted || fill_crypto_hdr)
2253                 return;
2254
2255         hdr = (void *)msdu->data;
2256         hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2257 }
2258
2259 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2260                                 struct ieee80211_rx_status *rx_status)
2261 {
2262         struct ieee80211_supported_band *sband;
2263         enum rx_msdu_start_pkt_type pkt_type;
2264         u8 bw;
2265         u8 rate_mcs, nss;
2266         u8 sgi;
2267         bool is_cck;
2268
2269         pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
2270         bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
2271         rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);
2272         nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);
2273         sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);
2274
2275         switch (pkt_type) {
2276         case RX_MSDU_START_PKT_TYPE_11A:
2277         case RX_MSDU_START_PKT_TYPE_11B:
2278                 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2279                 sband = &ar->mac.sbands[rx_status->band];
2280                 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
2281                                                                 is_cck);
2282                 break;
2283         case RX_MSDU_START_PKT_TYPE_11N:
2284                 rx_status->encoding = RX_ENC_HT;
2285                 if (rate_mcs > ATH11K_HT_MCS_MAX) {
2286                         ath11k_warn(ar->ab,
2287                                     "Received with invalid mcs in HT mode %d\n",
2288                                      rate_mcs);
2289                         break;
2290                 }
2291                 rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2292                 if (sgi)
2293                         rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2294                 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2295                 break;
2296         case RX_MSDU_START_PKT_TYPE_11AC:
2297                 rx_status->encoding = RX_ENC_VHT;
2298                 rx_status->rate_idx = rate_mcs;
2299                 if (rate_mcs > ATH11K_VHT_MCS_MAX) {
2300                         ath11k_warn(ar->ab,
2301                                     "Received with invalid mcs in VHT mode %d\n",
2302                                      rate_mcs);
2303                         break;
2304                 }
2305                 rx_status->nss = nss;
2306                 if (sgi)
2307                         rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2308                 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2309                 break;
2310         case RX_MSDU_START_PKT_TYPE_11AX:
2311                 rx_status->rate_idx = rate_mcs;
2312                 if (rate_mcs > ATH11K_HE_MCS_MAX) {
2313                         ath11k_warn(ar->ab,
2314                                     "Received with invalid mcs in HE mode %d\n",
2315                                     rate_mcs);
2316                         break;
2317                 }
2318                 rx_status->encoding = RX_ENC_HE;
2319                 rx_status->nss = nss;
2320                 rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
2321                 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2322                 break;
2323         }
2324 }
2325
2326 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2327                                 struct ieee80211_rx_status *rx_status)
2328 {
2329         u8 channel_num;
2330         u32 center_freq, meta_data;
2331         struct ieee80211_channel *channel;
2332
2333         rx_status->freq = 0;
2334         rx_status->rate_idx = 0;
2335         rx_status->nss = 0;
2336         rx_status->encoding = RX_ENC_LEGACY;
2337         rx_status->bw = RATE_INFO_BW_20;
2338
2339         rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2340
2341         meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);
2342         channel_num = meta_data;
2343         center_freq = meta_data >> 16;
2344
2345         if (center_freq >= 5935 && center_freq <= 7105) {
2346                 rx_status->band = NL80211_BAND_6GHZ;
2347         } else if (channel_num >= 1 && channel_num <= 14) {
2348                 rx_status->band = NL80211_BAND_2GHZ;
2349         } else if (channel_num >= 36 && channel_num <= 173) {
2350                 rx_status->band = NL80211_BAND_5GHZ;
2351         } else {
2352                 spin_lock_bh(&ar->data_lock);
2353                 channel = ar->rx_channel;
2354                 if (channel) {
2355                         rx_status->band = channel->band;
2356                         channel_num =
2357                                 ieee80211_frequency_to_channel(channel->center_freq);
2358                 }
2359                 spin_unlock_bh(&ar->data_lock);
2360                 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
2361                                 rx_desc, sizeof(struct hal_rx_desc));
2362         }
2363
2364         rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2365                                                          rx_status->band);
2366
2367         ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2368 }
2369
2370 static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out,
2371                                   size_t size)
2372 {
2373         u8 *qc;
2374         int tid;
2375
2376         if (!ieee80211_is_data_qos(hdr->frame_control))
2377                 return "";
2378
2379         qc = ieee80211_get_qos_ctl(hdr);
2380         tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
2381         snprintf(out, size, "tid %d", tid);
2382
2383         return out;
2384 }
2385
2386 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2387                                       struct sk_buff *msdu)
2388 {
2389         static const struct ieee80211_radiotap_he known = {
2390                 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2391                                      IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2392                 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2393         };
2394         struct ieee80211_rx_status *status;
2395         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
2396         struct ieee80211_radiotap_he *he = NULL;
2397         char tid[32];
2398
2399         status = IEEE80211_SKB_RXCB(msdu);
2400         if (status->encoding == RX_ENC_HE) {
2401                 he = skb_push(msdu, sizeof(known));
2402                 memcpy(he, &known, sizeof(known));
2403                 status->flag |= RX_FLAG_RADIOTAP_HE;
2404         }
2405
2406         ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2407                    "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2408                    msdu,
2409                    msdu->len,
2410                    ieee80211_get_SA(hdr),
2411                    ath11k_print_get_tid(hdr, tid, sizeof(tid)),
2412                    is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
2413                                                         "mcast" : "ucast",
2414                    (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
2415                    (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2416                    (status->encoding == RX_ENC_HT) ? "ht" : "",
2417                    (status->encoding == RX_ENC_VHT) ? "vht" : "",
2418                    (status->encoding == RX_ENC_HE) ? "he" : "",
2419                    (status->bw == RATE_INFO_BW_40) ? "40" : "",
2420                    (status->bw == RATE_INFO_BW_80) ? "80" : "",
2421                    (status->bw == RATE_INFO_BW_160) ? "160" : "",
2422                    status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2423                    status->rate_idx,
2424                    status->nss,
2425                    status->freq,
2426                    status->band, status->flag,
2427                    !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2428                    !!(status->flag & RX_FLAG_MMIC_ERROR),
2429                    !!(status->flag & RX_FLAG_AMSDU_MORE));
2430
2431         ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
2432                         msdu->data, msdu->len);
2433
2434         /* TODO: trace rx packet */
2435
2436         ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
2437 }
2438
2439 static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
2440                                      struct sk_buff *msdu,
2441                                      struct sk_buff_head *msdu_list)
2442 {
2443         struct ath11k_base *ab = ar->ab;
2444         struct hal_rx_desc *rx_desc, *lrx_desc;
2445         struct rx_attention *rx_attention;
2446         struct ieee80211_rx_status rx_status = {0};
2447         struct ieee80211_rx_status *status;
2448         struct ath11k_skb_rxcb *rxcb;
2449         struct ieee80211_hdr *hdr;
2450         struct sk_buff *last_buf;
2451         u8 l3_pad_bytes;
2452         u8 *hdr_status;
2453         u16 msdu_len;
2454         int ret;
2455         u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
2456
2457         last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2458         if (!last_buf) {
2459                 ath11k_warn(ab,
2460                             "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2461                 ret = -EIO;
2462                 goto free_out;
2463         }
2464
2465         rx_desc = (struct hal_rx_desc *)msdu->data;
2466         lrx_desc = (struct hal_rx_desc *)last_buf->data;
2467         rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
2468         if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
2469                 ath11k_warn(ab, "msdu_done bit in attention is not set\n");
2470                 ret = -EIO;
2471                 goto free_out;
2472         }
2473
2474         rxcb = ATH11K_SKB_RXCB(msdu);
2475         rxcb->rx_desc = rx_desc;
2476         msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);
2477         l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);
2478
2479         if (rxcb->is_frag) {
2480                 skb_pull(msdu, hal_rx_desc_sz);
2481         } else if (!rxcb->is_continuation) {
2482                 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2483                         hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
2484                         ret = -EINVAL;
2485                         ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);
2486                         ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
2487                                         sizeof(struct ieee80211_hdr));
2488                         ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
2489                                         sizeof(struct hal_rx_desc));
2490                         goto free_out;
2491                 }
2492                 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2493                 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2494         } else {
2495                 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
2496                                                  msdu, last_buf,
2497                                                  l3_pad_bytes, msdu_len);
2498                 if (ret) {
2499                         ath11k_warn(ab,
2500                                     "failed to coalesce msdu rx buffer%d\n", ret);
2501                         goto free_out;
2502                 }
2503         }
2504
2505         hdr = (struct ieee80211_hdr *)msdu->data;
2506
2507         /* Process only data frames */
2508         if (!ieee80211_is_data(hdr->frame_control))
2509                 return -EINVAL;
2510
2511         ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status);
2512         ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status);
2513
2514         rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2515
2516         status = IEEE80211_SKB_RXCB(msdu);
2517         *status = rx_status;
2518         return 0;
2519
2520 free_out:
2521         return ret;
2522 }
2523
2524 static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
2525                                                   struct napi_struct *napi,
2526                                                   struct sk_buff_head *msdu_list,
2527                                                   int *quota, int ring_id)
2528 {
2529         struct ath11k_skb_rxcb *rxcb;
2530         struct sk_buff *msdu;
2531         struct ath11k *ar;
2532         u8 mac_id;
2533         int ret;
2534
2535         if (skb_queue_empty(msdu_list))
2536                 return;
2537
2538         rcu_read_lock();
2539
2540         while (*quota && (msdu = __skb_dequeue(msdu_list))) {
2541                 rxcb = ATH11K_SKB_RXCB(msdu);
2542                 mac_id = rxcb->mac_id;
2543                 ar = ab->pdevs[mac_id].ar;
2544                 if (!rcu_dereference(ab->pdevs_active[mac_id])) {
2545                         dev_kfree_skb_any(msdu);
2546                         continue;
2547                 }
2548
2549                 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
2550                         dev_kfree_skb_any(msdu);
2551                         continue;
2552                 }
2553
2554                 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list);
2555                 if (ret) {
2556                         ath11k_dbg(ab, ATH11K_DBG_DATA,
2557                                    "Unable to process msdu %d", ret);
2558                         dev_kfree_skb_any(msdu);
2559                         continue;
2560                 }
2561
2562                 ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
2563                 (*quota)--;
2564         }
2565
2566         rcu_read_unlock();
2567 }
2568
2569 int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
2570                          struct napi_struct *napi, int budget)
2571 {
2572         struct ath11k_dp *dp = &ab->dp;
2573         struct dp_rxdma_ring *rx_ring;
2574         int num_buffs_reaped[MAX_RADIOS] = {0};
2575         struct sk_buff_head msdu_list;
2576         struct ath11k_skb_rxcb *rxcb;
2577         int total_msdu_reaped = 0;
2578         struct hal_srng *srng;
2579         struct sk_buff *msdu;
2580         int quota = budget;
2581         bool done = false;
2582         int buf_id, mac_id;
2583         struct ath11k *ar;
2584         u32 *rx_desc;
2585         int i;
2586
2587         __skb_queue_head_init(&msdu_list);
2588
2589         srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2590
2591         spin_lock_bh(&srng->lock);
2592
2593         ath11k_hal_srng_access_begin(ab, srng);
2594
2595 try_again:
2596         while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
2597                 struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc;
2598                 enum hal_reo_dest_ring_push_reason push_reason;
2599                 u32 cookie;
2600
2601                 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2602                                    desc.buf_addr_info.info1);
2603                 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2604                                    cookie);
2605                 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
2606
2607                 ar = ab->pdevs[mac_id].ar;
2608                 rx_ring = &ar->dp.rx_refill_buf_ring;
2609                 spin_lock_bh(&rx_ring->idr_lock);
2610                 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2611                 if (!msdu) {
2612                         ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2613                                     buf_id);
2614                         spin_unlock_bh(&rx_ring->idr_lock);
2615                         continue;
2616                 }
2617
2618                 idr_remove(&rx_ring->bufs_idr, buf_id);
2619                 spin_unlock_bh(&rx_ring->idr_lock);
2620
2621                 rxcb = ATH11K_SKB_RXCB(msdu);
2622                 dma_unmap_single(ab->dev, rxcb->paddr,
2623                                  msdu->len + skb_tailroom(msdu),
2624                                  DMA_FROM_DEVICE);
2625
2626                 num_buffs_reaped[mac_id]++;
2627                 total_msdu_reaped++;
2628
2629                 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2630                                         desc.info0);
2631                 if (push_reason !=
2632                     HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
2633                         dev_kfree_skb_any(msdu);
2634                         ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2635                         continue;
2636                 }
2637
2638                 rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 &
2639                                          RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2640                 rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 &
2641                                         RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2642                 rxcb->is_continuation = !!(desc.rx_msdu_info.info0 &
2643                                            RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2644                 rxcb->mac_id = mac_id;
2645                 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
2646                                       desc.info0);
2647
2648                 __skb_queue_tail(&msdu_list, msdu);
2649
2650                 if (total_msdu_reaped >= quota && !rxcb->is_continuation) {
2651                         done = true;
2652                         break;
2653                 }
2654         }
2655
2656         /* Hw might have updated the head pointer after we cached it.
2657          * In this case, even though there are entries in the ring we'll
2658          * get rx_desc NULL. Give the read another try with updated cached
2659          * head pointer so that we can reap complete MPDU in the current
2660          * rx processing.
2661          */
2662         if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) {
2663                 ath11k_hal_srng_access_end(ab, srng);
2664                 goto try_again;
2665         }
2666
2667         ath11k_hal_srng_access_end(ab, srng);
2668
2669         spin_unlock_bh(&srng->lock);
2670
2671         if (!total_msdu_reaped)
2672                 goto exit;
2673
2674         for (i = 0; i < ab->num_radios; i++) {
2675                 if (!num_buffs_reaped[i])
2676                         continue;
2677
2678                 ar = ab->pdevs[i].ar;
2679                 rx_ring = &ar->dp.rx_refill_buf_ring;
2680
2681                 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
2682                                            HAL_RX_BUF_RBM_SW3_BM);
2683         }
2684
2685         ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
2686                                               &quota, ring_id);
2687
2688 exit:
2689         return budget - quota;
2690 }
2691
2692 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2693                                            struct hal_rx_mon_ppdu_info *ppdu_info)
2694 {
2695         struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2696         u32 num_msdu;
2697
2698         if (!rx_stats)
2699                 return;
2700
2701         num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2702                    ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2703
2704         rx_stats->num_msdu += num_msdu;
2705         rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2706                                     ppdu_info->tcp_ack_msdu_count;
2707         rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2708         rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2709
2710         if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2711             ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2712                 ppdu_info->nss = 1;
2713                 ppdu_info->mcs = HAL_RX_MAX_MCS;
2714                 ppdu_info->tid = IEEE80211_NUM_TIDS;
2715         }
2716
2717         if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2718                 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2719
2720         if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2721                 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2722
2723         if (ppdu_info->gi < HAL_RX_GI_MAX)
2724                 rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2725
2726         if (ppdu_info->bw < HAL_RX_BW_MAX)
2727                 rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2728
2729         if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2730                 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2731
2732         if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2733                 rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2734
2735         if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2736                 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2737
2738         if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2739                 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2740
2741         if (ppdu_info->is_stbc)
2742                 rx_stats->stbc_count += num_msdu;
2743
2744         if (ppdu_info->beamformed)
2745                 rx_stats->beamformed_count += num_msdu;
2746
2747         if (ppdu_info->num_mpdu_fcs_ok > 1)
2748                 rx_stats->ampdu_msdu_count += num_msdu;
2749         else
2750                 rx_stats->non_ampdu_msdu_count += num_msdu;
2751
2752         rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2753         rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
2754         rx_stats->dcm_count += ppdu_info->dcm;
2755         rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
2756
2757         arsta->rssi_comb = ppdu_info->rssi_comb;
2758         rx_stats->rx_duration += ppdu_info->rx_duration;
2759         arsta->rx_duration = rx_stats->rx_duration;
2760 }
2761
2762 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2763                                                          struct dp_rxdma_ring *rx_ring,
2764                                                          int *buf_id)
2765 {
2766         struct sk_buff *skb;
2767         dma_addr_t paddr;
2768
2769         skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2770                             DP_RX_BUFFER_ALIGN_SIZE);
2771
2772         if (!skb)
2773                 goto fail_alloc_skb;
2774
2775         if (!IS_ALIGNED((unsigned long)skb->data,
2776                         DP_RX_BUFFER_ALIGN_SIZE)) {
2777                 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2778                          skb->data);
2779         }
2780
2781         paddr = dma_map_single(ab->dev, skb->data,
2782                                skb->len + skb_tailroom(skb),
2783                                DMA_FROM_DEVICE);
2784         if (unlikely(dma_mapping_error(ab->dev, paddr)))
2785                 goto fail_free_skb;
2786
2787         spin_lock_bh(&rx_ring->idr_lock);
2788         *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2789                             rx_ring->bufs_max, GFP_ATOMIC);
2790         spin_unlock_bh(&rx_ring->idr_lock);
2791         if (*buf_id < 0)
2792                 goto fail_dma_unmap;
2793
2794         ATH11K_SKB_RXCB(skb)->paddr = paddr;
2795         return skb;
2796
2797 fail_dma_unmap:
2798         dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2799                          DMA_FROM_DEVICE);
2800 fail_free_skb:
2801         dev_kfree_skb_any(skb);
2802 fail_alloc_skb:
2803         return NULL;
2804 }
2805
2806 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2807                                            struct dp_rxdma_ring *rx_ring,
2808                                            int req_entries,
2809                                            enum hal_rx_buf_return_buf_manager mgr)
2810 {
2811         struct hal_srng *srng;
2812         u32 *desc;
2813         struct sk_buff *skb;
2814         int num_free;
2815         int num_remain;
2816         int buf_id;
2817         u32 cookie;
2818         dma_addr_t paddr;
2819
2820         req_entries = min(req_entries, rx_ring->bufs_max);
2821
2822         srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2823
2824         spin_lock_bh(&srng->lock);
2825
2826         ath11k_hal_srng_access_begin(ab, srng);
2827
2828         num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2829
2830         req_entries = min(num_free, req_entries);
2831         num_remain = req_entries;
2832
2833         while (num_remain > 0) {
2834                 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2835                                                         &buf_id);
2836                 if (!skb)
2837                         break;
2838                 paddr = ATH11K_SKB_RXCB(skb)->paddr;
2839
2840                 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2841                 if (!desc)
2842                         goto fail_desc_get;
2843
2844                 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2845                          FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2846
2847                 num_remain--;
2848
2849                 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2850         }
2851
2852         ath11k_hal_srng_access_end(ab, srng);
2853
2854         spin_unlock_bh(&srng->lock);
2855
2856         return req_entries - num_remain;
2857
2858 fail_desc_get:
2859         spin_lock_bh(&rx_ring->idr_lock);
2860         idr_remove(&rx_ring->bufs_idr, buf_id);
2861         spin_unlock_bh(&rx_ring->idr_lock);
2862         dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2863                          DMA_FROM_DEVICE);
2864         dev_kfree_skb_any(skb);
2865         ath11k_hal_srng_access_end(ab, srng);
2866         spin_unlock_bh(&srng->lock);
2867
2868         return req_entries - num_remain;
2869 }
2870
2871 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
2872                                              int *budget, struct sk_buff_head *skb_list)
2873 {
2874         struct ath11k *ar;
2875         struct ath11k_pdev_dp *dp;
2876         struct dp_rxdma_ring *rx_ring;
2877         struct hal_srng *srng;
2878         void *rx_mon_status_desc;
2879         struct sk_buff *skb;
2880         struct ath11k_skb_rxcb *rxcb;
2881         struct hal_tlv_hdr *tlv;
2882         u32 cookie;
2883         int buf_id, srng_id;
2884         dma_addr_t paddr;
2885         u8 rbm;
2886         int num_buffs_reaped = 0;
2887
2888         ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
2889         dp = &ar->dp;
2890         srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
2891         rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
2892
2893         srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2894
2895         spin_lock_bh(&srng->lock);
2896
2897         ath11k_hal_srng_access_begin(ab, srng);
2898         while (*budget) {
2899                 *budget -= 1;
2900                 rx_mon_status_desc =
2901                         ath11k_hal_srng_src_peek(ab, srng);
2902                 if (!rx_mon_status_desc)
2903                         break;
2904
2905                 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
2906                                                 &cookie, &rbm);
2907                 if (paddr) {
2908                         buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
2909
2910                         spin_lock_bh(&rx_ring->idr_lock);
2911                         skb = idr_find(&rx_ring->bufs_idr, buf_id);
2912                         if (!skb) {
2913                                 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
2914                                             buf_id);
2915                                 spin_unlock_bh(&rx_ring->idr_lock);
2916                                 goto move_next;
2917                         }
2918
2919                         idr_remove(&rx_ring->bufs_idr, buf_id);
2920                         spin_unlock_bh(&rx_ring->idr_lock);
2921
2922                         rxcb = ATH11K_SKB_RXCB(skb);
2923
2924                         dma_unmap_single(ab->dev, rxcb->paddr,
2925                                          skb->len + skb_tailroom(skb),
2926                                          DMA_FROM_DEVICE);
2927
2928                         tlv = (struct hal_tlv_hdr *)skb->data;
2929                         if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
2930                                         HAL_RX_STATUS_BUFFER_DONE) {
2931                                 ath11k_warn(ab, "mon status DONE not set %lx\n",
2932                                             FIELD_GET(HAL_TLV_HDR_TAG,
2933                                                       tlv->tl));
2934                                 dev_kfree_skb_any(skb);
2935                                 goto move_next;
2936                         }
2937
2938                         __skb_queue_tail(skb_list, skb);
2939                 }
2940 move_next:
2941                 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2942                                                         &buf_id);
2943
2944                 if (!skb) {
2945                         ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
2946                                                         HAL_RX_BUF_RBM_SW3_BM);
2947                         num_buffs_reaped++;
2948                         break;
2949                 }
2950                 rxcb = ATH11K_SKB_RXCB(skb);
2951
2952                 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2953                          FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2954
2955                 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
2956                                                 cookie, HAL_RX_BUF_RBM_SW3_BM);
2957                 ath11k_hal_srng_src_get_next_entry(ab, srng);
2958                 num_buffs_reaped++;
2959         }
2960         ath11k_hal_srng_access_end(ab, srng);
2961         spin_unlock_bh(&srng->lock);
2962
2963         return num_buffs_reaped;
2964 }
2965
2966 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
2967                                     struct napi_struct *napi, int budget)
2968 {
2969         struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
2970         enum hal_rx_mon_status hal_status;
2971         struct sk_buff *skb;
2972         struct sk_buff_head skb_list;
2973         struct hal_rx_mon_ppdu_info ppdu_info;
2974         struct ath11k_peer *peer;
2975         struct ath11k_sta *arsta;
2976         int num_buffs_reaped = 0;
2977
2978         __skb_queue_head_init(&skb_list);
2979
2980         num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
2981                                                              &skb_list);
2982         if (!num_buffs_reaped)
2983                 goto exit;
2984
2985         while ((skb = __skb_dequeue(&skb_list))) {
2986                 memset(&ppdu_info, 0, sizeof(ppdu_info));
2987                 ppdu_info.peer_id = HAL_INVALID_PEERID;
2988
2989                 if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar))
2990                         trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
2991
2992                 hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
2993
2994                 if (ppdu_info.peer_id == HAL_INVALID_PEERID ||
2995                     hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
2996                         dev_kfree_skb_any(skb);
2997                         continue;
2998                 }
2999
3000                 rcu_read_lock();
3001                 spin_lock_bh(&ab->base_lock);
3002                 peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id);
3003
3004                 if (!peer || !peer->sta) {
3005                         ath11k_dbg(ab, ATH11K_DBG_DATA,
3006                                    "failed to find the peer with peer_id %d\n",
3007                                    ppdu_info.peer_id);
3008                         spin_unlock_bh(&ab->base_lock);
3009                         rcu_read_unlock();
3010                         dev_kfree_skb_any(skb);
3011                         continue;
3012                 }
3013
3014                 arsta = (struct ath11k_sta *)peer->sta->drv_priv;
3015                 ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
3016
3017                 if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
3018                         trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
3019
3020                 spin_unlock_bh(&ab->base_lock);
3021                 rcu_read_unlock();
3022
3023                 dev_kfree_skb_any(skb);
3024         }
3025 exit:
3026         return num_buffs_reaped;
3027 }
3028
3029 static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
3030 {
3031         struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
3032
3033         spin_lock_bh(&rx_tid->ab->base_lock);
3034         if (rx_tid->last_frag_no &&
3035             rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
3036                 spin_unlock_bh(&rx_tid->ab->base_lock);
3037                 return;
3038         }
3039         ath11k_dp_rx_frags_cleanup(rx_tid, true);
3040         spin_unlock_bh(&rx_tid->ab->base_lock);
3041 }
3042
3043 int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
3044 {
3045         struct ath11k_base *ab = ar->ab;
3046         struct crypto_shash *tfm;
3047         struct ath11k_peer *peer;
3048         struct dp_rx_tid *rx_tid;
3049         int i;
3050
3051         tfm = crypto_alloc_shash("michael_mic", 0, 0);
3052         if (IS_ERR(tfm))
3053                 return PTR_ERR(tfm);
3054
3055         spin_lock_bh(&ab->base_lock);
3056
3057         peer = ath11k_peer_find(ab, vdev_id, peer_mac);
3058         if (!peer) {
3059                 ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
3060                 spin_unlock_bh(&ab->base_lock);
3061                 return -ENOENT;
3062         }
3063
3064         for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
3065                 rx_tid = &peer->rx_tid[i];
3066                 rx_tid->ab = ab;
3067                 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
3068                 skb_queue_head_init(&rx_tid->rx_frags);
3069         }
3070
3071         peer->tfm_mmic = tfm;
3072         spin_unlock_bh(&ab->base_lock);
3073
3074         return 0;
3075 }
3076
3077 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
3078                                       struct ieee80211_hdr *hdr, u8 *data,
3079                                       size_t data_len, u8 *mic)
3080 {
3081         SHASH_DESC_ON_STACK(desc, tfm);
3082         u8 mic_hdr[16] = {0};
3083         u8 tid = 0;
3084         int ret;
3085
3086         if (!tfm)
3087                 return -EINVAL;
3088
3089         desc->tfm = tfm;
3090
3091         ret = crypto_shash_setkey(tfm, key, 8);
3092         if (ret)
3093                 goto out;
3094
3095         ret = crypto_shash_init(desc);
3096         if (ret)
3097                 goto out;
3098
3099         /* TKIP MIC header */
3100         memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
3101         memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
3102         if (ieee80211_is_data_qos(hdr->frame_control))
3103                 tid = ieee80211_get_tid(hdr);
3104         mic_hdr[12] = tid;
3105
3106         ret = crypto_shash_update(desc, mic_hdr, 16);
3107         if (ret)
3108                 goto out;
3109         ret = crypto_shash_update(desc, data, data_len);
3110         if (ret)
3111                 goto out;
3112         ret = crypto_shash_final(desc, mic);
3113 out:
3114         shash_desc_zero(desc);
3115         return ret;
3116 }
3117
3118 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
3119                                           struct sk_buff *msdu)
3120 {
3121         struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3122         struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
3123         struct ieee80211_key_conf *key_conf;
3124         struct ieee80211_hdr *hdr;
3125         u8 mic[IEEE80211_CCMP_MIC_LEN];
3126         int head_len, tail_len, ret;
3127         size_t data_len;
3128         u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3129         u8 *key, *data;
3130         u8 key_idx;
3131
3132         if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=
3133             HAL_ENCRYPT_TYPE_TKIP_MIC)
3134                 return 0;
3135
3136         hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3137         hdr_len = ieee80211_hdrlen(hdr->frame_control);
3138         head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
3139         tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
3140
3141         if (!is_multicast_ether_addr(hdr->addr1))
3142                 key_idx = peer->ucast_keyidx;
3143         else
3144                 key_idx = peer->mcast_keyidx;
3145
3146         key_conf = peer->keys[key_idx];
3147
3148         data = msdu->data + head_len;
3149         data_len = msdu->len - head_len - tail_len;
3150         key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
3151
3152         ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
3153         if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
3154                 goto mic_fail;
3155
3156         return 0;
3157
3158 mic_fail:
3159         (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
3160         (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
3161
3162         rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
3163                     RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
3164         skb_pull(msdu, hal_rx_desc_sz);
3165
3166         ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
3167         ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
3168                                HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
3169         ieee80211_rx(ar->hw, msdu);
3170         return -EINVAL;
3171 }
3172
3173 static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
3174                                         enum hal_encrypt_type enctype, u32 flags)
3175 {
3176         struct ieee80211_hdr *hdr;
3177         size_t hdr_len;
3178         size_t crypto_len;
3179         u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3180
3181         if (!flags)
3182                 return;
3183
3184         hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3185
3186         if (flags & RX_FLAG_MIC_STRIPPED)
3187                 skb_trim(msdu, msdu->len -
3188                          ath11k_dp_rx_crypto_mic_len(ar, enctype));
3189
3190         if (flags & RX_FLAG_ICV_STRIPPED)
3191                 skb_trim(msdu, msdu->len -
3192                          ath11k_dp_rx_crypto_icv_len(ar, enctype));
3193
3194         if (flags & RX_FLAG_IV_STRIPPED) {
3195                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
3196                 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
3197
3198                 memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,
3199                         (void *)msdu->data + hal_rx_desc_sz, hdr_len);
3200                 skb_pull(msdu, crypto_len);
3201         }
3202 }
3203
3204 static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
3205                                  struct ath11k_peer *peer,
3206                                  struct dp_rx_tid *rx_tid,
3207                                  struct sk_buff **defrag_skb)
3208 {
3209         struct hal_rx_desc *rx_desc;
3210         struct sk_buff *skb, *first_frag, *last_frag;
3211         struct ieee80211_hdr *hdr;
3212         struct rx_attention *rx_attention;
3213         enum hal_encrypt_type enctype;
3214         bool is_decrypted = false;
3215         int msdu_len = 0;
3216         int extra_space;
3217         u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3218
3219         first_frag = skb_peek(&rx_tid->rx_frags);
3220         last_frag = skb_peek_tail(&rx_tid->rx_frags);
3221
3222         skb_queue_walk(&rx_tid->rx_frags, skb) {
3223                 flags = 0;
3224                 rx_desc = (struct hal_rx_desc *)skb->data;
3225                 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3226
3227                 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
3228                 if (enctype != HAL_ENCRYPT_TYPE_OPEN) {
3229                         rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
3230                         is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
3231                 }
3232
3233                 if (is_decrypted) {
3234                         if (skb != first_frag)
3235                                 flags |=  RX_FLAG_IV_STRIPPED;
3236                         if (skb != last_frag)
3237                                 flags |= RX_FLAG_ICV_STRIPPED |
3238                                          RX_FLAG_MIC_STRIPPED;
3239                 }
3240
3241                 /* RX fragments are always raw packets */
3242                 if (skb != last_frag)
3243                         skb_trim(skb, skb->len - FCS_LEN);
3244                 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3245
3246                 if (skb != first_frag)
3247                         skb_pull(skb, hal_rx_desc_sz +
3248                                       ieee80211_hdrlen(hdr->frame_control));
3249                 msdu_len += skb->len;
3250         }
3251
3252         extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3253         if (extra_space > 0 &&
3254             (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3255                 return -ENOMEM;
3256
3257         __skb_unlink(first_frag, &rx_tid->rx_frags);
3258         while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3259                 skb_put_data(first_frag, skb->data, skb->len);
3260                 dev_kfree_skb_any(skb);
3261         }
3262
3263         hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
3264         hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3265         ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
3266
3267         if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3268                 first_frag = NULL;
3269
3270         *defrag_skb = first_frag;
3271         return 0;
3272 }
3273
3274 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
3275                                               struct sk_buff *defrag_skb)
3276 {
3277         struct ath11k_base *ab = ar->ab;
3278         struct ath11k_pdev_dp *dp = &ar->dp;
3279         struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
3280         struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3281         struct hal_reo_entrance_ring *reo_ent_ring;
3282         struct hal_reo_dest_ring *reo_dest_ring;
3283         struct dp_link_desc_bank *link_desc_banks;
3284         struct hal_rx_msdu_link *msdu_link;
3285         struct hal_rx_msdu_details *msdu0;
3286         struct hal_srng *srng;
3287         dma_addr_t paddr;
3288         u32 desc_bank, msdu_info, mpdu_info;
3289         u32 dst_idx, cookie, hal_rx_desc_sz;
3290         int ret, buf_id;
3291
3292         hal_rx_desc_sz = ab->hw_params.hal_desc_sz;
3293         link_desc_banks = ab->dp.link_desc_banks;
3294         reo_dest_ring = rx_tid->dst_ring_desc;
3295
3296         ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3297         msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3298                         (paddr - link_desc_banks[desc_bank].paddr));
3299         msdu0 = &msdu_link->msdu_link[0];
3300         dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
3301         memset(msdu0, 0, sizeof(*msdu0));
3302
3303         msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
3304                     FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
3305                     FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
3306                     FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
3307                                defrag_skb->len - hal_rx_desc_sz) |
3308                     FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
3309                     FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
3310                     FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
3311         msdu0->rx_msdu_info.info0 = msdu_info;
3312
3313         /* change msdu len in hal rx desc */
3314         ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3315
3316         paddr = dma_map_single(ab->dev, defrag_skb->data,
3317                                defrag_skb->len + skb_tailroom(defrag_skb),
3318                                DMA_FROM_DEVICE);
3319         if (dma_mapping_error(ab->dev, paddr))
3320                 return -ENOMEM;
3321
3322         spin_lock_bh(&rx_refill_ring->idr_lock);
3323         buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
3324                            rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
3325         spin_unlock_bh(&rx_refill_ring->idr_lock);
3326         if (buf_id < 0) {
3327                 ret = -ENOMEM;
3328                 goto err_unmap_dma;
3329         }
3330
3331         ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
3332         cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
3333                  FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3334
3335         ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM);
3336
3337         /* Fill mpdu details into reo entrace ring */
3338         srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
3339
3340         spin_lock_bh(&srng->lock);
3341         ath11k_hal_srng_access_begin(ab, srng);
3342
3343         reo_ent_ring = (struct hal_reo_entrance_ring *)
3344                         ath11k_hal_srng_src_get_next_entry(ab, srng);
3345         if (!reo_ent_ring) {
3346                 ath11k_hal_srng_access_end(ab, srng);
3347                 spin_unlock_bh(&srng->lock);
3348                 ret = -ENOSPC;
3349                 goto err_free_idr;
3350         }
3351         memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3352
3353         ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3354         ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
3355                                         HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
3356
3357         mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
3358                     FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
3359                     FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
3360                     FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
3361                     FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
3362                     FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
3363                     FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
3364
3365         reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
3366         reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
3367         reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
3368         reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
3369                                          FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
3370                                                    reo_dest_ring->info0)) |
3371                               FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
3372         ath11k_hal_srng_access_end(ab, srng);
3373         spin_unlock_bh(&srng->lock);
3374
3375         return 0;
3376
3377 err_free_idr:
3378         spin_lock_bh(&rx_refill_ring->idr_lock);
3379         idr_remove(&rx_refill_ring->bufs_idr, buf_id);
3380         spin_unlock_bh(&rx_refill_ring->idr_lock);
3381 err_unmap_dma:
3382         dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3383                          DMA_FROM_DEVICE);
3384         return ret;
3385 }
3386
3387 static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,
3388                                     struct sk_buff *a, struct sk_buff *b)
3389 {
3390         int frag1, frag2;
3391
3392         frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);
3393         frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);
3394
3395         return frag1 - frag2;
3396 }
3397
3398 static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,
3399                                       struct sk_buff_head *frag_list,
3400                                       struct sk_buff *cur_frag)
3401 {
3402         struct sk_buff *skb;
3403         int cmp;
3404
3405         skb_queue_walk(frag_list, skb) {
3406                 cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);
3407                 if (cmp < 0)
3408                         continue;
3409                 __skb_queue_before(frag_list, skb, cur_frag);
3410                 return;
3411         }
3412         __skb_queue_tail(frag_list, cur_frag);
3413 }
3414
3415 static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)
3416 {
3417         struct ieee80211_hdr *hdr;
3418         u64 pn = 0;
3419         u8 *ehdr;
3420         u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3421
3422         hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3423         ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3424
3425         pn = ehdr[0];
3426         pn |= (u64)ehdr[1] << 8;
3427         pn |= (u64)ehdr[4] << 16;
3428         pn |= (u64)ehdr[5] << 24;
3429         pn |= (u64)ehdr[6] << 32;
3430         pn |= (u64)ehdr[7] << 40;
3431
3432         return pn;
3433 }
3434
3435 static bool
3436 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
3437 {
3438         enum hal_encrypt_type encrypt_type;
3439         struct sk_buff *first_frag, *skb;
3440         struct hal_rx_desc *desc;
3441         u64 last_pn;
3442         u64 cur_pn;
3443
3444         first_frag = skb_peek(&rx_tid->rx_frags);
3445         desc = (struct hal_rx_desc *)first_frag->data;
3446
3447         encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);
3448         if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3449             encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3450             encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3451             encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3452                 return true;
3453
3454         last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);
3455         skb_queue_walk(&rx_tid->rx_frags, skb) {
3456                 if (skb == first_frag)
3457                         continue;
3458
3459                 cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);
3460                 if (cur_pn != last_pn + 1)
3461                         return false;
3462                 last_pn = cur_pn;
3463         }
3464         return true;
3465 }
3466
3467 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
3468                                     struct sk_buff *msdu,
3469                                     u32 *ring_desc)
3470 {
3471         struct ath11k_base *ab = ar->ab;
3472         struct hal_rx_desc *rx_desc;
3473         struct ath11k_peer *peer;
3474         struct dp_rx_tid *rx_tid;
3475         struct sk_buff *defrag_skb = NULL;
3476         u32 peer_id;
3477         u16 seqno, frag_no;
3478         u8 tid;
3479         int ret = 0;
3480         bool more_frags;
3481         bool is_mcbc;
3482
3483         rx_desc = (struct hal_rx_desc *)msdu->data;
3484         peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
3485         tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);
3486         seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
3487         frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
3488         more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
3489         is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
3490
3491         /* Multicast/Broadcast fragments are not expected */
3492         if (is_mcbc)
3493                 return -EINVAL;
3494
3495         if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
3496             !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
3497             tid > IEEE80211_NUM_TIDS)
3498                 return -EINVAL;
3499
3500         /* received unfragmented packet in reo
3501          * exception ring, this shouldn't happen
3502          * as these packets typically come from
3503          * reo2sw srngs.
3504          */
3505         if (WARN_ON_ONCE(!frag_no && !more_frags))
3506                 return -EINVAL;
3507
3508         spin_lock_bh(&ab->base_lock);
3509         peer = ath11k_peer_find_by_id(ab, peer_id);
3510         if (!peer) {
3511                 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3512                             peer_id);
3513                 ret = -ENOENT;
3514                 goto out_unlock;
3515         }
3516         rx_tid = &peer->rx_tid[tid];
3517
3518         if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3519             skb_queue_empty(&rx_tid->rx_frags)) {
3520                 /* Flush stored fragments and start a new sequence */
3521                 ath11k_dp_rx_frags_cleanup(rx_tid, true);
3522                 rx_tid->cur_sn = seqno;
3523         }
3524
3525         if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3526                 /* Fragment already present */
3527                 ret = -EINVAL;
3528                 goto out_unlock;
3529         }
3530
3531         if (frag_no > __fls(rx_tid->rx_frag_bitmap))
3532                 __skb_queue_tail(&rx_tid->rx_frags, msdu);
3533         else
3534                 ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
3535
3536         rx_tid->rx_frag_bitmap |= BIT(frag_no);
3537         if (!more_frags)
3538                 rx_tid->last_frag_no = frag_no;
3539
3540         if (frag_no == 0) {
3541                 rx_tid->dst_ring_desc = kmemdup(ring_desc,
3542                                                 sizeof(*rx_tid->dst_ring_desc),
3543                                                 GFP_ATOMIC);
3544                 if (!rx_tid->dst_ring_desc) {
3545                         ret = -ENOMEM;
3546                         goto out_unlock;
3547                 }
3548         } else {
3549                 ath11k_dp_rx_link_desc_return(ab, ring_desc,
3550                                               HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3551         }
3552
3553         if (!rx_tid->last_frag_no ||
3554             rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3555                 mod_timer(&rx_tid->frag_timer, jiffies +
3556                                                ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
3557                 goto out_unlock;
3558         }
3559
3560         spin_unlock_bh(&ab->base_lock);
3561         del_timer_sync(&rx_tid->frag_timer);
3562         spin_lock_bh(&ab->base_lock);
3563
3564         peer = ath11k_peer_find_by_id(ab, peer_id);
3565         if (!peer)
3566                 goto err_frags_cleanup;
3567
3568         if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3569                 goto err_frags_cleanup;
3570
3571         if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3572                 goto err_frags_cleanup;
3573
3574         if (!defrag_skb)
3575                 goto err_frags_cleanup;
3576
3577         if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3578                 goto err_frags_cleanup;
3579
3580         ath11k_dp_rx_frags_cleanup(rx_tid, false);
3581         goto out_unlock;
3582
3583 err_frags_cleanup:
3584         dev_kfree_skb_any(defrag_skb);
3585         ath11k_dp_rx_frags_cleanup(rx_tid, true);
3586 out_unlock:
3587         spin_unlock_bh(&ab->base_lock);
3588         return ret;
3589 }
3590
3591 static int
3592 ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
3593 {
3594         struct ath11k_pdev_dp *dp = &ar->dp;
3595         struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
3596         struct sk_buff *msdu;
3597         struct ath11k_skb_rxcb *rxcb;
3598         struct hal_rx_desc *rx_desc;
3599         u8 *hdr_status;
3600         u16 msdu_len;
3601         u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3602
3603         spin_lock_bh(&rx_ring->idr_lock);
3604         msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3605         if (!msdu) {
3606                 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
3607                             buf_id);
3608                 spin_unlock_bh(&rx_ring->idr_lock);
3609                 return -EINVAL;
3610         }
3611
3612         idr_remove(&rx_ring->bufs_idr, buf_id);
3613         spin_unlock_bh(&rx_ring->idr_lock);
3614
3615         rxcb = ATH11K_SKB_RXCB(msdu);
3616         dma_unmap_single(ar->ab->dev, rxcb->paddr,
3617                          msdu->len + skb_tailroom(msdu),
3618                          DMA_FROM_DEVICE);
3619
3620         if (drop) {
3621                 dev_kfree_skb_any(msdu);
3622                 return 0;
3623         }
3624
3625         rcu_read_lock();
3626         if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3627                 dev_kfree_skb_any(msdu);
3628                 goto exit;
3629         }
3630
3631         if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3632                 dev_kfree_skb_any(msdu);
3633                 goto exit;
3634         }
3635
3636         rx_desc = (struct hal_rx_desc *)msdu->data;
3637         msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);
3638         if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3639                 hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
3640                 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3641                 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
3642                                 sizeof(struct ieee80211_hdr));
3643                 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
3644                                 sizeof(struct hal_rx_desc));
3645                 dev_kfree_skb_any(msdu);
3646                 goto exit;
3647         }
3648
3649         skb_put(msdu, hal_rx_desc_sz + msdu_len);
3650
3651         if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
3652                 dev_kfree_skb_any(msdu);
3653                 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
3654                                               HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3655         }
3656 exit:
3657         rcu_read_unlock();
3658         return 0;
3659 }
3660
3661 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
3662                              int budget)
3663 {
3664         u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3665         struct dp_link_desc_bank *link_desc_banks;
3666         enum hal_rx_buf_return_buf_manager rbm;
3667         int tot_n_bufs_reaped, quota, ret, i;
3668         int n_bufs_reaped[MAX_RADIOS] = {0};
3669         struct dp_rxdma_ring *rx_ring;
3670         struct dp_srng *reo_except;
3671         u32 desc_bank, num_msdus;
3672         struct hal_srng *srng;
3673         struct ath11k_dp *dp;
3674         void *link_desc_va;
3675         int buf_id, mac_id;
3676         struct ath11k *ar;
3677         dma_addr_t paddr;
3678         u32 *desc;
3679         bool is_frag;
3680         u8 drop = 0;
3681
3682         tot_n_bufs_reaped = 0;
3683         quota = budget;
3684
3685         dp = &ab->dp;
3686         reo_except = &dp->reo_except_ring;
3687         link_desc_banks = dp->link_desc_banks;
3688
3689         srng = &ab->hal.srng_list[reo_except->ring_id];
3690
3691         spin_lock_bh(&srng->lock);
3692
3693         ath11k_hal_srng_access_begin(ab, srng);
3694
3695         while (budget &&
3696                (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3697                 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
3698
3699                 ab->soc_stats.err_ring_pkts++;
3700                 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
3701                                                     &desc_bank);
3702                 if (ret) {
3703                         ath11k_warn(ab, "failed to parse error reo desc %d\n",
3704                                     ret);
3705                         continue;
3706                 }
3707                 link_desc_va = link_desc_banks[desc_bank].vaddr +
3708                                (paddr - link_desc_banks[desc_bank].paddr);
3709                 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3710                                                  &rbm);
3711                 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
3712                     rbm != HAL_RX_BUF_RBM_SW3_BM) {
3713                         ab->soc_stats.invalid_rbm++;
3714                         ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
3715                         ath11k_dp_rx_link_desc_return(ab, desc,
3716                                                       HAL_WBM_REL_BM_ACT_REL_MSDU);
3717                         continue;
3718                 }
3719
3720                 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
3721
3722                 /* Process only rx fragments with one msdu per link desc below, and drop
3723                  * msdu's indicated due to error reasons.
3724                  */
3725                 if (!is_frag || num_msdus > 1) {
3726                         drop = 1;
3727                         /* Return the link desc back to wbm idle list */
3728                         ath11k_dp_rx_link_desc_return(ab, desc,
3729                                                       HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3730                 }
3731
3732                 for (i = 0; i < num_msdus; i++) {
3733                         buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3734                                            msdu_cookies[i]);
3735
3736                         mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
3737                                            msdu_cookies[i]);
3738
3739                         ar = ab->pdevs[mac_id].ar;
3740
3741                         if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
3742                                 n_bufs_reaped[mac_id]++;
3743                                 tot_n_bufs_reaped++;
3744                         }
3745                 }
3746
3747                 if (tot_n_bufs_reaped >= quota) {
3748                         tot_n_bufs_reaped = quota;
3749                         goto exit;
3750                 }
3751
3752                 budget = quota - tot_n_bufs_reaped;
3753         }
3754
3755 exit:
3756         ath11k_hal_srng_access_end(ab, srng);
3757
3758         spin_unlock_bh(&srng->lock);
3759
3760         for (i = 0; i <  ab->num_radios; i++) {
3761                 if (!n_bufs_reaped[i])
3762                         continue;
3763
3764                 ar = ab->pdevs[i].ar;
3765                 rx_ring = &ar->dp.rx_refill_buf_ring;
3766
3767                 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
3768                                            HAL_RX_BUF_RBM_SW3_BM);
3769         }
3770
3771         return tot_n_bufs_reaped;
3772 }
3773
3774 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
3775                                              int msdu_len,
3776                                              struct sk_buff_head *msdu_list)
3777 {
3778         struct sk_buff *skb, *tmp;
3779         struct ath11k_skb_rxcb *rxcb;
3780         int n_buffs;
3781
3782         n_buffs = DIV_ROUND_UP(msdu_len,
3783                                (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));
3784
3785         skb_queue_walk_safe(msdu_list, skb, tmp) {
3786                 rxcb = ATH11K_SKB_RXCB(skb);
3787                 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3788                     rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3789                         if (!n_buffs)
3790                                 break;
3791                         __skb_unlink(skb, msdu_list);
3792                         dev_kfree_skb_any(skb);
3793                         n_buffs--;
3794                 }
3795         }
3796 }
3797
3798 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
3799                                       struct ieee80211_rx_status *status,
3800                                       struct sk_buff_head *msdu_list)
3801 {
3802         u16 msdu_len;
3803         struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3804         struct rx_attention *rx_attention;
3805         u8 l3pad_bytes;
3806         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3807         u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3808
3809         msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
3810
3811         if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3812                 /* First buffer will be freed by the caller, so deduct it's length */
3813                 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3814                 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3815                 return -EINVAL;
3816         }
3817
3818         rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);
3819         if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
3820                 ath11k_warn(ar->ab,
3821                             "msdu_done bit not set in null_q_des processing\n");
3822                 __skb_queue_purge(msdu_list);
3823                 return -EIO;
3824         }
3825
3826         /* Handle NULL queue descriptor violations arising out a missing
3827          * REO queue for a given peer or a given TID. This typically
3828          * may happen if a packet is received on a QOS enabled TID before the
3829          * ADDBA negotiation for that TID, when the TID queue is setup. Or
3830          * it may also happen for MC/BC frames if they are not routed to the
3831          * non-QOS TID queue, in the absence of any other default TID queue.
3832          * This error can show up both in a REO destination or WBM release ring.
3833          */
3834
3835         rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
3836         rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
3837
3838         if (rxcb->is_frag) {
3839                 skb_pull(msdu, hal_rx_desc_sz);
3840         } else {
3841                 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
3842
3843                 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3844                         return -EINVAL;
3845
3846                 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3847                 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3848         }
3849         ath11k_dp_rx_h_ppdu(ar, desc, status);
3850
3851         ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
3852
3853         rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);
3854
3855         /* Please note that caller will having the access to msdu and completing
3856          * rx with mac80211. Need not worry about cleaning up amsdu_list.
3857          */
3858
3859         return 0;
3860 }
3861
3862 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
3863                                    struct ieee80211_rx_status *status,
3864                                    struct sk_buff_head *msdu_list)
3865 {
3866         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3867         bool drop = false;
3868
3869         ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3870
3871         switch (rxcb->err_code) {
3872         case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3873                 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3874                         drop = true;
3875                 break;
3876         case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3877                 /* TODO: Do not drop PN failed packets in the driver;
3878                  * instead, it is good to drop such packets in mac80211
3879                  * after incrementing the replay counters.
3880                  */
3881                 fallthrough;
3882         default:
3883                 /* TODO: Review other errors and process them to mac80211
3884                  * as appropriate.
3885                  */
3886                 drop = true;
3887                 break;
3888         }
3889
3890         return drop;
3891 }
3892
3893 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
3894                                         struct ieee80211_rx_status *status)
3895 {
3896         u16 msdu_len;
3897         struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3898         u8 l3pad_bytes;
3899         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3900         u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3901
3902         rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
3903         rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
3904
3905         l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
3906         msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
3907         skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3908         skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3909
3910         ath11k_dp_rx_h_ppdu(ar, desc, status);
3911
3912         status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
3913                          RX_FLAG_DECRYPTED);
3914
3915         ath11k_dp_rx_h_undecap(ar, msdu, desc,
3916                                HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
3917 }
3918
3919 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar,  struct sk_buff *msdu,
3920                                      struct ieee80211_rx_status *status)
3921 {
3922         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3923         bool drop = false;
3924
3925         ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
3926
3927         switch (rxcb->err_code) {
3928         case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
3929                 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
3930                 break;
3931         default:
3932                 /* TODO: Review other rxdma error code to check if anything is
3933                  * worth reporting to mac80211
3934                  */
3935                 drop = true;
3936                 break;
3937         }
3938
3939         return drop;
3940 }
3941
3942 static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
3943                                  struct napi_struct *napi,
3944                                  struct sk_buff *msdu,
3945                                  struct sk_buff_head *msdu_list)
3946 {
3947         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3948         struct ieee80211_rx_status rxs = {0};
3949         struct ieee80211_rx_status *status;
3950         bool drop = true;
3951
3952         switch (rxcb->err_rel_src) {
3953         case HAL_WBM_REL_SRC_MODULE_REO:
3954                 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
3955                 break;
3956         case HAL_WBM_REL_SRC_MODULE_RXDMA:
3957                 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
3958                 break;
3959         default:
3960                 /* msdu will get freed */
3961                 break;
3962         }
3963
3964         if (drop) {
3965                 dev_kfree_skb_any(msdu);
3966                 return;
3967         }
3968
3969         status = IEEE80211_SKB_RXCB(msdu);
3970         *status = rxs;
3971
3972         ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
3973 }
3974
3975 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
3976                                  struct napi_struct *napi, int budget)
3977 {
3978         struct ath11k *ar;
3979         struct ath11k_dp *dp = &ab->dp;
3980         struct dp_rxdma_ring *rx_ring;
3981         struct hal_rx_wbm_rel_info err_info;
3982         struct hal_srng *srng;
3983         struct sk_buff *msdu;
3984         struct sk_buff_head msdu_list[MAX_RADIOS];
3985         struct ath11k_skb_rxcb *rxcb;
3986         u32 *rx_desc;
3987         int buf_id, mac_id;
3988         int num_buffs_reaped[MAX_RADIOS] = {0};
3989         int total_num_buffs_reaped = 0;
3990         int ret, i;
3991
3992         for (i = 0; i < ab->num_radios; i++)
3993                 __skb_queue_head_init(&msdu_list[i]);
3994
3995         srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
3996
3997         spin_lock_bh(&srng->lock);
3998
3999         ath11k_hal_srng_access_begin(ab, srng);
4000
4001         while (budget) {
4002                 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
4003                 if (!rx_desc)
4004                         break;
4005
4006                 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
4007                 if (ret) {
4008                         ath11k_warn(ab,
4009                                     "failed to parse rx error in wbm_rel ring desc %d\n",
4010                                     ret);
4011                         continue;
4012                 }
4013
4014                 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
4015                 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
4016
4017                 ar = ab->pdevs[mac_id].ar;
4018                 rx_ring = &ar->dp.rx_refill_buf_ring;
4019
4020                 spin_lock_bh(&rx_ring->idr_lock);
4021                 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4022                 if (!msdu) {
4023                         ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
4024                                     buf_id, mac_id);
4025                         spin_unlock_bh(&rx_ring->idr_lock);
4026                         continue;
4027                 }
4028
4029                 idr_remove(&rx_ring->bufs_idr, buf_id);
4030                 spin_unlock_bh(&rx_ring->idr_lock);
4031
4032                 rxcb = ATH11K_SKB_RXCB(msdu);
4033                 dma_unmap_single(ab->dev, rxcb->paddr,
4034                                  msdu->len + skb_tailroom(msdu),
4035                                  DMA_FROM_DEVICE);
4036
4037                 num_buffs_reaped[mac_id]++;
4038                 total_num_buffs_reaped++;
4039                 budget--;
4040
4041                 if (err_info.push_reason !=
4042                     HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4043                         dev_kfree_skb_any(msdu);
4044                         continue;
4045                 }
4046
4047                 rxcb->err_rel_src = err_info.err_rel_src;
4048                 rxcb->err_code = err_info.err_code;
4049                 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
4050                 __skb_queue_tail(&msdu_list[mac_id], msdu);
4051         }
4052
4053         ath11k_hal_srng_access_end(ab, srng);
4054
4055         spin_unlock_bh(&srng->lock);
4056
4057         if (!total_num_buffs_reaped)
4058                 goto done;
4059
4060         for (i = 0; i <  ab->num_radios; i++) {
4061                 if (!num_buffs_reaped[i])
4062                         continue;
4063
4064                 ar = ab->pdevs[i].ar;
4065                 rx_ring = &ar->dp.rx_refill_buf_ring;
4066
4067                 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
4068                                            HAL_RX_BUF_RBM_SW3_BM);
4069         }
4070
4071         rcu_read_lock();
4072         for (i = 0; i <  ab->num_radios; i++) {
4073                 if (!rcu_dereference(ab->pdevs_active[i])) {
4074                         __skb_queue_purge(&msdu_list[i]);
4075                         continue;
4076                 }
4077
4078                 ar = ab->pdevs[i].ar;
4079
4080                 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
4081                         __skb_queue_purge(&msdu_list[i]);
4082                         continue;
4083                 }
4084
4085                 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
4086                         ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
4087         }
4088         rcu_read_unlock();
4089 done:
4090         return total_num_buffs_reaped;
4091 }
4092
4093 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
4094 {
4095         struct ath11k *ar;
4096         struct dp_srng *err_ring;
4097         struct dp_rxdma_ring *rx_ring;
4098         struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
4099         struct hal_srng *srng;
4100         u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
4101         enum hal_rx_buf_return_buf_manager rbm;
4102         enum hal_reo_entr_rxdma_ecode rxdma_err_code;
4103         struct ath11k_skb_rxcb *rxcb;
4104         struct sk_buff *skb;
4105         struct hal_reo_entrance_ring *entr_ring;
4106         void *desc;
4107         int num_buf_freed = 0;
4108         int quota = budget;
4109         dma_addr_t paddr;
4110         u32 desc_bank;
4111         void *link_desc_va;
4112         int num_msdus;
4113         int i;
4114         int buf_id;
4115
4116         ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
4117         err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
4118                                                                           mac_id)];
4119         rx_ring = &ar->dp.rx_refill_buf_ring;
4120
4121         srng = &ab->hal.srng_list[err_ring->ring_id];
4122
4123         spin_lock_bh(&srng->lock);
4124
4125         ath11k_hal_srng_access_begin(ab, srng);
4126
4127         while (quota-- &&
4128                (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4129                 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
4130
4131                 entr_ring = (struct hal_reo_entrance_ring *)desc;
4132                 rxdma_err_code =
4133                         FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4134                                   entr_ring->info1);
4135                 ab->soc_stats.rxdma_error[rxdma_err_code]++;
4136
4137                 link_desc_va = link_desc_banks[desc_bank].vaddr +
4138                                (paddr - link_desc_banks[desc_bank].paddr);
4139                 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
4140                                                  msdu_cookies, &rbm);
4141
4142                 for (i = 0; i < num_msdus; i++) {
4143                         buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4144                                            msdu_cookies[i]);
4145
4146                         spin_lock_bh(&rx_ring->idr_lock);
4147                         skb = idr_find(&rx_ring->bufs_idr, buf_id);
4148                         if (!skb) {
4149                                 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
4150                                             buf_id);
4151                                 spin_unlock_bh(&rx_ring->idr_lock);
4152                                 continue;
4153                         }
4154
4155                         idr_remove(&rx_ring->bufs_idr, buf_id);
4156                         spin_unlock_bh(&rx_ring->idr_lock);
4157
4158                         rxcb = ATH11K_SKB_RXCB(skb);
4159                         dma_unmap_single(ab->dev, rxcb->paddr,
4160                                          skb->len + skb_tailroom(skb),
4161                                          DMA_FROM_DEVICE);
4162                         dev_kfree_skb_any(skb);
4163
4164                         num_buf_freed++;
4165                 }
4166
4167                 ath11k_dp_rx_link_desc_return(ab, desc,
4168                                               HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4169         }
4170
4171         ath11k_hal_srng_access_end(ab, srng);
4172
4173         spin_unlock_bh(&srng->lock);
4174
4175         if (num_buf_freed)
4176                 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
4177                                            HAL_RX_BUF_RBM_SW3_BM);
4178
4179         return budget - quota;
4180 }
4181
4182 void ath11k_dp_process_reo_status(struct ath11k_base *ab)
4183 {
4184         struct ath11k_dp *dp = &ab->dp;
4185         struct hal_srng *srng;
4186         struct dp_reo_cmd *cmd, *tmp;
4187         bool found = false;
4188         u32 *reo_desc;
4189         u16 tag;
4190         struct hal_reo_status reo_status;
4191
4192         srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
4193
4194         memset(&reo_status, 0, sizeof(reo_status));
4195
4196         spin_lock_bh(&srng->lock);
4197
4198         ath11k_hal_srng_access_begin(ab, srng);
4199
4200         while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4201                 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
4202
4203                 switch (tag) {
4204                 case HAL_REO_GET_QUEUE_STATS_STATUS:
4205                         ath11k_hal_reo_status_queue_stats(ab, reo_desc,
4206                                                           &reo_status);
4207                         break;
4208                 case HAL_REO_FLUSH_QUEUE_STATUS:
4209                         ath11k_hal_reo_flush_queue_status(ab, reo_desc,
4210                                                           &reo_status);
4211                         break;
4212                 case HAL_REO_FLUSH_CACHE_STATUS:
4213                         ath11k_hal_reo_flush_cache_status(ab, reo_desc,
4214                                                           &reo_status);
4215                         break;
4216                 case HAL_REO_UNBLOCK_CACHE_STATUS:
4217                         ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
4218                                                           &reo_status);
4219                         break;
4220                 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
4221                         ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
4222                                                                  &reo_status);
4223                         break;
4224                 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
4225                         ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
4226                                                                   &reo_status);
4227                         break;
4228                 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4229                         ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
4230                                                                   &reo_status);
4231                         break;
4232                 default:
4233                         ath11k_warn(ab, "Unknown reo status type %d\n", tag);
4234                         continue;
4235                 }
4236
4237                 spin_lock_bh(&dp->reo_cmd_lock);
4238                 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4239                         if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4240                                 found = true;
4241                                 list_del(&cmd->list);
4242                                 break;
4243                         }
4244                 }
4245                 spin_unlock_bh(&dp->reo_cmd_lock);
4246
4247                 if (found) {
4248                         cmd->handler(dp, (void *)&cmd->data,
4249                                      reo_status.uniform_hdr.cmd_status);
4250                         kfree(cmd);
4251                 }
4252
4253                 found = false;
4254         }
4255
4256         ath11k_hal_srng_access_end(ab, srng);
4257
4258         spin_unlock_bh(&srng->lock);
4259 }
4260
4261 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
4262 {
4263         struct ath11k *ar = ab->pdevs[mac_id].ar;
4264
4265         ath11k_dp_rx_pdev_srng_free(ar);
4266         ath11k_dp_rxdma_pdev_buf_free(ar);
4267 }
4268
4269 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
4270 {
4271         struct ath11k *ar = ab->pdevs[mac_id].ar;
4272         struct ath11k_pdev_dp *dp = &ar->dp;
4273         u32 ring_id;
4274         int i;
4275         int ret;
4276
4277         ret = ath11k_dp_rx_pdev_srng_alloc(ar);
4278         if (ret) {
4279                 ath11k_warn(ab, "failed to setup rx srngs\n");
4280                 return ret;
4281         }
4282
4283         ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
4284         if (ret) {
4285                 ath11k_warn(ab, "failed to setup rxdma ring\n");
4286                 return ret;
4287         }
4288
4289         ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4290         ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
4291         if (ret) {
4292                 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4293                             ret);
4294                 return ret;
4295         }
4296
4297         if (ab->hw_params.rx_mac_buf_ring) {
4298                 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4299                         ring_id = dp->rx_mac_buf_ring[i].ring_id;
4300                         ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4301                                                           mac_id + i, HAL_RXDMA_BUF);
4302                         if (ret) {
4303                                 ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4304                                             i, ret);
4305                                 return ret;
4306                         }
4307                 }
4308         }
4309
4310         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4311                 ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4312                 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4313                                                   mac_id + i, HAL_RXDMA_DST);
4314                 if (ret) {
4315                         ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4316                                     i, ret);
4317                         return ret;
4318                 }
4319         }
4320
4321         if (!ab->hw_params.rxdma1_enable)
4322                 goto config_refill_ring;
4323
4324         ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4325         ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4326                                           mac_id, HAL_RXDMA_MONITOR_BUF);
4327         if (ret) {
4328                 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4329                             ret);
4330                 return ret;
4331         }
4332         ret = ath11k_dp_tx_htt_srng_setup(ab,
4333                                           dp->rxdma_mon_dst_ring.ring_id,
4334                                           mac_id, HAL_RXDMA_MONITOR_DST);
4335         if (ret) {
4336                 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4337                             ret);
4338                 return ret;
4339         }
4340         ret = ath11k_dp_tx_htt_srng_setup(ab,
4341                                           dp->rxdma_mon_desc_ring.ring_id,
4342                                           mac_id, HAL_RXDMA_MONITOR_DESC);
4343         if (ret) {
4344                 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4345                             ret);
4346                 return ret;
4347         }
4348
4349 config_refill_ring:
4350         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4351                 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
4352                 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
4353                                                   HAL_RXDMA_MONITOR_STATUS);
4354                 if (ret) {
4355                         ath11k_warn(ab,
4356                                     "failed to configure mon_status_refill_ring%d %d\n",
4357                                     i, ret);
4358                         return ret;
4359                 }
4360         }
4361
4362         return 0;
4363 }
4364
4365 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
4366 {
4367         if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
4368                 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
4369                 *total_len -= *frag_len;
4370         } else {
4371                 *frag_len = *total_len;
4372                 *total_len = 0;
4373         }
4374 }
4375
4376 static
4377 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
4378                                           void *p_last_buf_addr_info,
4379                                           u8 mac_id)
4380 {
4381         struct ath11k_pdev_dp *dp = &ar->dp;
4382         struct dp_srng *dp_srng;
4383         void *hal_srng;
4384         void *src_srng_desc;
4385         int ret = 0;
4386
4387         if (ar->ab->hw_params.rxdma1_enable) {
4388                 dp_srng = &dp->rxdma_mon_desc_ring;
4389                 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4390         } else {
4391                 dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
4392                 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4393         }
4394
4395         ath11k_hal_srng_access_begin(ar->ab, hal_srng);
4396
4397         src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
4398
4399         if (src_srng_desc) {
4400                 struct ath11k_buffer_addr *src_desc =
4401                                 (struct ath11k_buffer_addr *)src_srng_desc;
4402
4403                 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
4404         } else {
4405                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4406                            "Monitor Link Desc Ring %d Full", mac_id);
4407                 ret = -ENOMEM;
4408         }
4409
4410         ath11k_hal_srng_access_end(ar->ab, hal_srng);
4411         return ret;
4412 }
4413
4414 static
4415 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
4416                                          dma_addr_t *paddr, u32 *sw_cookie,
4417                                          u8 *rbm,
4418                                          void **pp_buf_addr_info)
4419 {
4420         struct hal_rx_msdu_link *msdu_link =
4421                         (struct hal_rx_msdu_link *)rx_msdu_link_desc;
4422         struct ath11k_buffer_addr *buf_addr_info;
4423
4424         buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
4425
4426         ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
4427
4428         *pp_buf_addr_info = (void *)buf_addr_info;
4429 }
4430
4431 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
4432 {
4433         if (skb->len > len) {
4434                 skb_trim(skb, len);
4435         } else {
4436                 if (skb_tailroom(skb) < len - skb->len) {
4437                         if ((pskb_expand_head(skb, 0,
4438                                               len - skb->len - skb_tailroom(skb),
4439                                               GFP_ATOMIC))) {
4440                                 dev_kfree_skb_any(skb);
4441                                 return -ENOMEM;
4442                         }
4443                 }
4444                 skb_put(skb, (len - skb->len));
4445         }
4446         return 0;
4447 }
4448
4449 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
4450                                         void *msdu_link_desc,
4451                                         struct hal_rx_msdu_list *msdu_list,
4452                                         u16 *num_msdus)
4453 {
4454         struct hal_rx_msdu_details *msdu_details = NULL;
4455         struct rx_msdu_desc *msdu_desc_info = NULL;
4456         struct hal_rx_msdu_link *msdu_link = NULL;
4457         int i;
4458         u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
4459         u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
4460         u8  tmp  = 0;
4461
4462         msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
4463         msdu_details = &msdu_link->msdu_link[0];
4464
4465         for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
4466                 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
4467                               msdu_details[i].buf_addr_info.info0) == 0) {
4468                         msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
4469                         msdu_desc_info->info0 |= last;
4470                         ;
4471                         break;
4472                 }
4473                 msdu_desc_info = &msdu_details[i].rx_msdu_info;
4474
4475                 if (!i)
4476                         msdu_desc_info->info0 |= first;
4477                 else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
4478                         msdu_desc_info->info0 |= last;
4479                 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
4480                 msdu_list->msdu_info[i].msdu_len =
4481                          HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
4482                 msdu_list->sw_cookie[i] =
4483                         FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
4484                                   msdu_details[i].buf_addr_info.info1);
4485                 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
4486                                 msdu_details[i].buf_addr_info.info1);
4487                 msdu_list->rbm[i] = tmp;
4488         }
4489         *num_msdus = i;
4490 }
4491
4492 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
4493                                         u32 *rx_bufs_used)
4494 {
4495         u32 ret = 0;
4496
4497         if ((*ppdu_id < msdu_ppdu_id) &&
4498             ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
4499                 *ppdu_id = msdu_ppdu_id;
4500                 ret = msdu_ppdu_id;
4501         } else if ((*ppdu_id > msdu_ppdu_id) &&
4502                 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
4503                 /* mon_dst is behind than mon_status
4504                  * skip dst_ring and free it
4505                  */
4506                 *rx_bufs_used += 1;
4507                 *ppdu_id = msdu_ppdu_id;
4508                 ret = msdu_ppdu_id;
4509         }
4510         return ret;
4511 }
4512
4513 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
4514                                       bool *is_frag, u32 *total_len,
4515                                       u32 *frag_len, u32 *msdu_cnt)
4516 {
4517         if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
4518                 if (!*is_frag) {
4519                         *total_len = info->msdu_len;
4520                         *is_frag = true;
4521                 }
4522                 ath11k_dp_mon_set_frag_len(total_len,
4523                                            frag_len);
4524         } else {
4525                 if (*is_frag) {
4526                         ath11k_dp_mon_set_frag_len(total_len,
4527                                                    frag_len);
4528                 } else {
4529                         *frag_len = info->msdu_len;
4530                 }
4531                 *is_frag = false;
4532                 *msdu_cnt -= 1;
4533         }
4534 }
4535
4536 static u32
4537 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
4538                           void *ring_entry, struct sk_buff **head_msdu,
4539                           struct sk_buff **tail_msdu, u32 *npackets,
4540                           u32 *ppdu_id)
4541 {
4542         struct ath11k_pdev_dp *dp = &ar->dp;
4543         struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4544         struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
4545         struct sk_buff *msdu = NULL, *last = NULL;
4546         struct hal_rx_msdu_list msdu_list;
4547         void *p_buf_addr_info, *p_last_buf_addr_info;
4548         struct hal_rx_desc *rx_desc;
4549         void *rx_msdu_link_desc;
4550         dma_addr_t paddr;
4551         u16 num_msdus = 0;
4552         u32 rx_buf_size, rx_pkt_offset, sw_cookie;
4553         u32 rx_bufs_used = 0, i = 0;
4554         u32 msdu_ppdu_id = 0, msdu_cnt = 0;
4555         u32 total_len = 0, frag_len = 0;
4556         bool is_frag, is_first_msdu;
4557         bool drop_mpdu = false;
4558         struct ath11k_skb_rxcb *rxcb;
4559         struct hal_reo_entrance_ring *ent_desc =
4560                         (struct hal_reo_entrance_ring *)ring_entry;
4561         int buf_id;
4562         u32 rx_link_buf_info[2];
4563         u8 rbm;
4564
4565         if (!ar->ab->hw_params.rxdma1_enable)
4566                 rx_ring = &dp->rx_refill_buf_ring;
4567
4568         ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
4569                                             &sw_cookie,
4570                                             &p_last_buf_addr_info, &rbm,
4571                                             &msdu_cnt);
4572
4573         if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
4574                       ent_desc->info1) ==
4575                       HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4576                 u8 rxdma_err =
4577                         FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4578                                   ent_desc->info1);
4579                 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
4580                     rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
4581                     rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
4582                         drop_mpdu = true;
4583                         pmon->rx_mon_stats.dest_mpdu_drop++;
4584                 }
4585         }
4586
4587         is_frag = false;
4588         is_first_msdu = true;
4589
4590         do {
4591                 if (pmon->mon_last_linkdesc_paddr == paddr) {
4592                         pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
4593                         return rx_bufs_used;
4594                 }
4595
4596                 if (ar->ab->hw_params.rxdma1_enable)
4597                         rx_msdu_link_desc =
4598                                 (void *)pmon->link_desc_banks[sw_cookie].vaddr +
4599                                 (paddr - pmon->link_desc_banks[sw_cookie].paddr);
4600                 else
4601                         rx_msdu_link_desc =
4602                                 (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
4603                                 (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
4604
4605                 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
4606                                             &num_msdus);
4607
4608                 for (i = 0; i < num_msdus; i++) {
4609                         u32 l2_hdr_offset;
4610
4611                         if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
4612                                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4613                                            "i %d last_cookie %d is same\n",
4614                                            i, pmon->mon_last_buf_cookie);
4615                                 drop_mpdu = true;
4616                                 pmon->rx_mon_stats.dup_mon_buf_cnt++;
4617                                 continue;
4618                         }
4619                         buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4620                                            msdu_list.sw_cookie[i]);
4621
4622                         spin_lock_bh(&rx_ring->idr_lock);
4623                         msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4624                         spin_unlock_bh(&rx_ring->idr_lock);
4625                         if (!msdu) {
4626                                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4627                                            "msdu_pop: invalid buf_id %d\n", buf_id);
4628                                 break;
4629                         }
4630                         rxcb = ATH11K_SKB_RXCB(msdu);
4631                         if (!rxcb->unmapped) {
4632                                 dma_unmap_single(ar->ab->dev, rxcb->paddr,
4633                                                  msdu->len +
4634                                                  skb_tailroom(msdu),
4635                                                  DMA_FROM_DEVICE);
4636                                 rxcb->unmapped = 1;
4637                         }
4638                         if (drop_mpdu) {
4639                                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4640                                            "i %d drop msdu %p *ppdu_id %x\n",
4641                                            i, msdu, *ppdu_id);
4642                                 dev_kfree_skb_any(msdu);
4643                                 msdu = NULL;
4644                                 goto next_msdu;
4645                         }
4646
4647                         rx_desc = (struct hal_rx_desc *)msdu->data;
4648
4649                         rx_pkt_offset = sizeof(struct hal_rx_desc);
4650                         l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
4651
4652                         if (is_first_msdu) {
4653                                 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
4654                                         drop_mpdu = true;
4655                                         dev_kfree_skb_any(msdu);
4656                                         msdu = NULL;
4657                                         pmon->mon_last_linkdesc_paddr = paddr;
4658                                         goto next_msdu;
4659                                 }
4660
4661                                 msdu_ppdu_id =
4662                                         ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
4663
4664                                 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
4665                                                                  ppdu_id,
4666                                                                  &rx_bufs_used)) {
4667                                         if (rx_bufs_used) {
4668                                                 drop_mpdu = true;
4669                                                 dev_kfree_skb_any(msdu);
4670                                                 msdu = NULL;
4671                                                 goto next_msdu;
4672                                         }
4673                                         return rx_bufs_used;
4674                                 }
4675                                 pmon->mon_last_linkdesc_paddr = paddr;
4676                                 is_first_msdu = false;
4677                         }
4678                         ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
4679                                                   &is_frag, &total_len,
4680                                                   &frag_len, &msdu_cnt);
4681                         rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
4682
4683                         ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
4684
4685                         if (!(*head_msdu))
4686                                 *head_msdu = msdu;
4687                         else if (last)
4688                                 last->next = msdu;
4689
4690                         last = msdu;
4691 next_msdu:
4692                         pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
4693                         rx_bufs_used++;
4694                         spin_lock_bh(&rx_ring->idr_lock);
4695                         idr_remove(&rx_ring->bufs_idr, buf_id);
4696                         spin_unlock_bh(&rx_ring->idr_lock);
4697                 }
4698
4699                 ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
4700
4701                 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
4702                                                     &sw_cookie, &rbm,
4703                                                     &p_buf_addr_info);
4704
4705                 if (ar->ab->hw_params.rxdma1_enable) {
4706                         if (ath11k_dp_rx_monitor_link_desc_return(ar,
4707                                                                   p_last_buf_addr_info,
4708                                                                   dp->mac_id))
4709                                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4710                                            "dp_rx_monitor_link_desc_return failed");
4711                 } else {
4712                         ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
4713                                                       HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4714                 }
4715
4716                 p_last_buf_addr_info = p_buf_addr_info;
4717
4718         } while (paddr && msdu_cnt);
4719
4720         if (last)
4721                 last->next = NULL;
4722
4723         *tail_msdu = msdu;
4724
4725         if (msdu_cnt == 0)
4726                 *npackets = 1;
4727
4728         return rx_bufs_used;
4729 }
4730
4731 static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)
4732 {
4733         u32 rx_pkt_offset, l2_hdr_offset;
4734
4735         rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;
4736         l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,
4737                                                       (struct hal_rx_desc *)msdu->data);
4738         skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
4739 }
4740
4741 static struct sk_buff *
4742 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
4743                             u32 mac_id, struct sk_buff *head_msdu,
4744                             struct sk_buff *last_msdu,
4745                             struct ieee80211_rx_status *rxs)
4746 {
4747         struct ath11k_base *ab = ar->ab;
4748         struct sk_buff *msdu, *mpdu_buf, *prev_buf;
4749         u32 wifi_hdr_len;
4750         struct hal_rx_desc *rx_desc;
4751         char *hdr_desc;
4752         u8 *dest, decap_format;
4753         struct ieee80211_hdr_3addr *wh;
4754         struct rx_attention *rx_attention;
4755
4756         mpdu_buf = NULL;
4757
4758         if (!head_msdu)
4759                 goto err_merge_fail;
4760
4761         rx_desc = (struct hal_rx_desc *)head_msdu->data;
4762         rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
4763
4764         if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
4765                 return NULL;
4766
4767         decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);
4768
4769         ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
4770
4771         if (decap_format == DP_RX_DECAP_TYPE_RAW) {
4772                 ath11k_dp_rx_msdus_set_payload(ar, head_msdu);
4773
4774                 prev_buf = head_msdu;
4775                 msdu = head_msdu->next;
4776
4777                 while (msdu) {
4778                         ath11k_dp_rx_msdus_set_payload(ar, msdu);
4779
4780                         prev_buf = msdu;
4781                         msdu = msdu->next;
4782                 }
4783
4784                 prev_buf->next = NULL;
4785
4786                 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
4787         } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
4788                 __le16 qos_field;
4789                 u8 qos_pkt = 0;
4790
4791                 rx_desc = (struct hal_rx_desc *)head_msdu->data;
4792                 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
4793
4794                 /* Base size */
4795                 wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
4796                 wh = (struct ieee80211_hdr_3addr *)hdr_desc;
4797
4798                 if (ieee80211_is_data_qos(wh->frame_control)) {
4799                         struct ieee80211_qos_hdr *qwh =
4800                                         (struct ieee80211_qos_hdr *)hdr_desc;
4801
4802                         qos_field = qwh->qos_ctrl;
4803                         qos_pkt = 1;
4804                 }
4805                 msdu = head_msdu;
4806
4807                 while (msdu) {
4808                         rx_desc = (struct hal_rx_desc *)msdu->data;
4809                         hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
4810
4811                         if (qos_pkt) {
4812                                 dest = skb_push(msdu, sizeof(__le16));
4813                                 if (!dest)
4814                                         goto err_merge_fail;
4815                                 memcpy(dest, hdr_desc, wifi_hdr_len);
4816                                 memcpy(dest + wifi_hdr_len,
4817                                        (u8 *)&qos_field, sizeof(__le16));
4818                         }
4819                         ath11k_dp_rx_msdus_set_payload(ar, msdu);
4820                         prev_buf = msdu;
4821                         msdu = msdu->next;
4822                 }
4823                 dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
4824                 if (!dest)
4825                         goto err_merge_fail;
4826
4827                 ath11k_dbg(ab, ATH11K_DBG_DATA,
4828                            "mpdu_buf %pK mpdu_buf->len %u",
4829                            prev_buf, prev_buf->len);
4830         } else {
4831                 ath11k_dbg(ab, ATH11K_DBG_DATA,
4832                            "decap format %d is not supported!\n",
4833                            decap_format);
4834                 goto err_merge_fail;
4835         }
4836
4837         return head_msdu;
4838
4839 err_merge_fail:
4840         if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
4841                 ath11k_dbg(ab, ATH11K_DBG_DATA,
4842                            "err_merge_fail mpdu_buf %pK", mpdu_buf);
4843                 /* Free the head buffer */
4844                 dev_kfree_skb_any(mpdu_buf);
4845         }
4846         return NULL;
4847 }
4848
4849 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
4850                                     struct sk_buff *head_msdu,
4851                                     struct sk_buff *tail_msdu,
4852                                     struct napi_struct *napi)
4853 {
4854         struct ath11k_pdev_dp *dp = &ar->dp;
4855         struct sk_buff *mon_skb, *skb_next, *header;
4856         struct ieee80211_rx_status *rxs = &dp->rx_status, *status;
4857
4858         mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
4859                                               tail_msdu, rxs);
4860
4861         if (!mon_skb)
4862                 goto mon_deliver_fail;
4863
4864         header = mon_skb;
4865
4866         rxs->flag = 0;
4867         do {
4868                 skb_next = mon_skb->next;
4869                 if (!skb_next)
4870                         rxs->flag &= ~RX_FLAG_AMSDU_MORE;
4871                 else
4872                         rxs->flag |= RX_FLAG_AMSDU_MORE;
4873
4874                 if (mon_skb == header) {
4875                         header = NULL;
4876                         rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
4877                 } else {
4878                         rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
4879                 }
4880                 rxs->flag |= RX_FLAG_ONLY_MONITOR;
4881
4882                 status = IEEE80211_SKB_RXCB(mon_skb);
4883                 *status = *rxs;
4884
4885                 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb);
4886                 mon_skb = skb_next;
4887         } while (mon_skb);
4888         rxs->flag = 0;
4889
4890         return 0;
4891
4892 mon_deliver_fail:
4893         mon_skb = head_msdu;
4894         while (mon_skb) {
4895                 skb_next = mon_skb->next;
4896                 dev_kfree_skb_any(mon_skb);
4897                 mon_skb = skb_next;
4898         }
4899         return -EINVAL;
4900 }
4901
4902 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
4903                                           u32 quota, struct napi_struct *napi)
4904 {
4905         struct ath11k_pdev_dp *dp = &ar->dp;
4906         struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4907         void *ring_entry;
4908         void *mon_dst_srng;
4909         u32 ppdu_id;
4910         u32 rx_bufs_used;
4911         u32 ring_id;
4912         struct ath11k_pdev_mon_stats *rx_mon_stats;
4913         u32      npackets = 0;
4914
4915         if (ar->ab->hw_params.rxdma1_enable)
4916                 ring_id = dp->rxdma_mon_dst_ring.ring_id;
4917         else
4918                 ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
4919
4920         mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
4921
4922         if (!mon_dst_srng) {
4923                 ath11k_warn(ar->ab,
4924                             "HAL Monitor Destination Ring Init Failed -- %pK",
4925                             mon_dst_srng);
4926                 return;
4927         }
4928
4929         spin_lock_bh(&pmon->mon_lock);
4930
4931         ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
4932
4933         ppdu_id = pmon->mon_ppdu_info.ppdu_id;
4934         rx_bufs_used = 0;
4935         rx_mon_stats = &pmon->rx_mon_stats;
4936
4937         while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
4938                 struct sk_buff *head_msdu, *tail_msdu;
4939
4940                 head_msdu = NULL;
4941                 tail_msdu = NULL;
4942
4943                 rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
4944                                                           &head_msdu,
4945                                                           &tail_msdu,
4946                                                           &npackets, &ppdu_id);
4947
4948                 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
4949                         pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4950                         ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4951                                    "dest_rx: new ppdu_id %x != status ppdu_id %x",
4952                                    ppdu_id, pmon->mon_ppdu_info.ppdu_id);
4953                         break;
4954                 }
4955                 if (head_msdu && tail_msdu) {
4956                         ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
4957                                                  tail_msdu, napi);
4958                         rx_mon_stats->dest_mpdu_done++;
4959                 }
4960
4961                 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
4962                                                                 mon_dst_srng);
4963         }
4964         ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
4965
4966         spin_unlock_bh(&pmon->mon_lock);
4967
4968         if (rx_bufs_used) {
4969                 rx_mon_stats->dest_ppdu_done++;
4970                 if (ar->ab->hw_params.rxdma1_enable)
4971                         ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
4972                                                    &dp->rxdma_mon_buf_ring,
4973                                                    rx_bufs_used,
4974                                                    HAL_RX_BUF_RBM_SW3_BM);
4975                 else
4976                         ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
4977                                                    &dp->rx_refill_buf_ring,
4978                                                    rx_bufs_used,
4979                                                    HAL_RX_BUF_RBM_SW3_BM);
4980         }
4981 }
4982
4983 static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
4984                                                 int mac_id, u32 quota,
4985                                                 struct napi_struct *napi)
4986 {
4987         struct ath11k_pdev_dp *dp = &ar->dp;
4988         struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4989         struct hal_rx_mon_ppdu_info *ppdu_info;
4990         struct sk_buff *status_skb;
4991         u32 tlv_status = HAL_TLV_STATUS_BUF_DONE;
4992         struct ath11k_pdev_mon_stats *rx_mon_stats;
4993
4994         ppdu_info = &pmon->mon_ppdu_info;
4995         rx_mon_stats = &pmon->rx_mon_stats;
4996
4997         if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START)
4998                 return;
4999
5000         while (!skb_queue_empty(&pmon->rx_status_q)) {
5001                 status_skb = skb_dequeue(&pmon->rx_status_q);
5002
5003                 tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info,
5004                                                             status_skb);
5005                 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
5006                         rx_mon_stats->status_ppdu_done++;
5007                         pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
5008                         ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi);
5009                         pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5010                 }
5011                 dev_kfree_skb_any(status_skb);
5012         }
5013 }
5014
5015 static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
5016                                     struct napi_struct *napi, int budget)
5017 {
5018         struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5019         struct ath11k_pdev_dp *dp = &ar->dp;
5020         struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5021         int num_buffs_reaped = 0;
5022
5023         num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget,
5024                                                              &pmon->rx_status_q);
5025         if (num_buffs_reaped)
5026                 ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi);
5027
5028         return num_buffs_reaped;
5029 }
5030
5031 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
5032                                    struct napi_struct *napi, int budget)
5033 {
5034         struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5035         int ret = 0;
5036
5037         if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags))
5038                 ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
5039         else
5040                 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
5041         return ret;
5042 }
5043
5044 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
5045 {
5046         struct ath11k_pdev_dp *dp = &ar->dp;
5047         struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5048
5049         skb_queue_head_init(&pmon->rx_status_q);
5050
5051         pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5052
5053         memset(&pmon->rx_mon_stats, 0,
5054                sizeof(pmon->rx_mon_stats));
5055         return 0;
5056 }
5057
5058 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
5059 {
5060         struct ath11k_pdev_dp *dp = &ar->dp;
5061         struct ath11k_mon_data *pmon = &dp->mon_data;
5062         struct hal_srng *mon_desc_srng = NULL;
5063         struct dp_srng *dp_srng;
5064         int ret = 0;
5065         u32 n_link_desc = 0;
5066
5067         ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
5068         if (ret) {
5069                 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
5070                 return ret;
5071         }
5072
5073         /* if rxdma1_enable is false, no need to setup
5074          * rxdma_mon_desc_ring.
5075          */
5076         if (!ar->ab->hw_params.rxdma1_enable)
5077                 return 0;
5078
5079         dp_srng = &dp->rxdma_mon_desc_ring;
5080         n_link_desc = dp_srng->size /
5081                 ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
5082         mon_desc_srng =
5083                 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
5084
5085         ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
5086                                         HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
5087                                         n_link_desc);
5088         if (ret) {
5089                 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
5090                 return ret;
5091         }
5092         pmon->mon_last_linkdesc_paddr = 0;
5093         pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
5094         spin_lock_init(&pmon->mon_lock);
5095
5096         return 0;
5097 }
5098
5099 static int ath11k_dp_mon_link_free(struct ath11k *ar)
5100 {
5101         struct ath11k_pdev_dp *dp = &ar->dp;
5102         struct ath11k_mon_data *pmon = &dp->mon_data;
5103
5104         ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
5105                                     HAL_RXDMA_MONITOR_DESC,
5106                                     &dp->rxdma_mon_desc_ring);
5107         return 0;
5108 }
5109
5110 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
5111 {
5112         ath11k_dp_mon_link_free(ar);
5113         return 0;
5114 }
5115
5116 int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab)
5117 {
5118         /* start reap timer */
5119         mod_timer(&ab->mon_reap_timer,
5120                   jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
5121
5122         return 0;
5123 }
5124
5125 int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)
5126 {
5127         int ret;
5128
5129         if (stop_timer)
5130                 del_timer_sync(&ab->mon_reap_timer);
5131
5132         /* reap all the monitor related rings */
5133         ret = ath11k_dp_purge_mon_ring(ab);
5134         if (ret) {
5135                 ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
5136                 return ret;
5137         }
5138
5139         return 0;
5140 }