Merge tag 'gcc-plugins-v4.20-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / net / wireless / intel / iwlwifi / mvm / tx.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11  * Copyright(c) 2018        Intel Corporation
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of version 2 of the GNU General Public License as
15  * published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * General Public License for more details.
21  *
22  * The full GNU General Public License is included in this distribution
23  * in the file called COPYING.
24  *
25  * Contact Information:
26  *  Intel Linux Wireless <linuxwifi@intel.com>
27  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28  *
29  * BSD LICENSE
30  *
31  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018        Intel Corporation
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *  * Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  *  * Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in
45  *    the documentation and/or other materials provided with the
46  *    distribution.
47  *  * Neither the name Intel Corporation nor the names of its
48  *    contributors may be used to endorse or promote products derived
49  *    from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  *
63  *****************************************************************************/
64 #include <linux/ieee80211.h>
65 #include <linux/etherdevice.h>
66 #include <linux/tcp.h>
67 #include <net/ip.h>
68 #include <net/ipv6.h>
69
70 #include "iwl-trans.h"
71 #include "iwl-eeprom-parse.h"
72 #include "mvm.h"
73 #include "sta.h"
74
75 static void
76 iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
77                           u16 tid, u16 ssn)
78 {
79         struct iwl_fw_dbg_trigger_tlv *trig;
80         struct iwl_fw_dbg_trigger_ba *ba_trig;
81
82         trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA);
83         if (!trig)
84                 return;
85
86         ba_trig = (void *)trig->data;
87
88         if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
89                 return;
90
91         iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
92                                 "BAR sent to %pM, tid %d, ssn %d",
93                                 addr, tid, ssn);
94 }
95
96 #define OPT_HDR(type, skb, off) \
97         (type *)(skb_network_header(skb) + (off))
98
99 static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
100                            struct ieee80211_hdr *hdr,
101                            struct ieee80211_tx_info *info,
102                            u16 offload_assist)
103 {
104 #if IS_ENABLED(CONFIG_INET)
105         u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
106         u8 protocol = 0;
107
108         /*
109          * Do not compute checksum if already computed or if transport will
110          * compute it
111          */
112         if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
113                 goto out;
114
115         /* We do not expect to be requested to csum stuff we do not support */
116         if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
117                       (skb->protocol != htons(ETH_P_IP) &&
118                        skb->protocol != htons(ETH_P_IPV6)),
119                       "No support for requested checksum\n")) {
120                 skb_checksum_help(skb);
121                 goto out;
122         }
123
124         if (skb->protocol == htons(ETH_P_IP)) {
125                 protocol = ip_hdr(skb)->protocol;
126         } else {
127 #if IS_ENABLED(CONFIG_IPV6)
128                 struct ipv6hdr *ipv6h =
129                         (struct ipv6hdr *)skb_network_header(skb);
130                 unsigned int off = sizeof(*ipv6h);
131
132                 protocol = ipv6h->nexthdr;
133                 while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
134                         struct ipv6_opt_hdr *hp;
135
136                         /* only supported extension headers */
137                         if (protocol != NEXTHDR_ROUTING &&
138                             protocol != NEXTHDR_HOP &&
139                             protocol != NEXTHDR_DEST) {
140                                 skb_checksum_help(skb);
141                                 goto out;
142                         }
143
144                         hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
145                         protocol = hp->nexthdr;
146                         off += ipv6_optlen(hp);
147                 }
148                 /* if we get here - protocol now should be TCP/UDP */
149 #endif
150         }
151
152         if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
153                 WARN_ON_ONCE(1);
154                 skb_checksum_help(skb);
155                 goto out;
156         }
157
158         /* enable L4 csum */
159         offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
160
161         /*
162          * Set offset to IP header (snap).
163          * We don't support tunneling so no need to take care of inner header.
164          * Size is in words.
165          */
166         offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
167
168         /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
169         if (skb->protocol == htons(ETH_P_IP) &&
170             (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
171                 ip_hdr(skb)->check = 0;
172                 offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
173         }
174
175         /* reset UDP/TCP header csum */
176         if (protocol == IPPROTO_TCP)
177                 tcp_hdr(skb)->check = 0;
178         else
179                 udp_hdr(skb)->check = 0;
180
181         /*
182          * mac header len should include IV, size is in words unless
183          * the IV is added by the firmware like in WEP.
184          * In new Tx API, the IV is always added by the firmware.
185          */
186         if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
187             info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
188             info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
189                 mh_len += info->control.hw_key->iv_len;
190         mh_len /= 2;
191         offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
192
193 out:
194 #endif
195         return offload_assist;
196 }
197
198 /*
199  * Sets most of the Tx cmd's fields
200  */
201 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
202                         struct iwl_tx_cmd *tx_cmd,
203                         struct ieee80211_tx_info *info, u8 sta_id)
204 {
205         struct ieee80211_hdr *hdr = (void *)skb->data;
206         __le16 fc = hdr->frame_control;
207         u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
208         u32 len = skb->len + FCS_LEN;
209         u16 offload_assist = 0;
210         u8 ac;
211
212         if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
213                 tx_flags |= TX_CMD_FLG_ACK;
214         else
215                 tx_flags &= ~TX_CMD_FLG_ACK;
216
217         if (ieee80211_is_probe_resp(fc))
218                 tx_flags |= TX_CMD_FLG_TSF;
219
220         if (ieee80211_has_morefrags(fc))
221                 tx_flags |= TX_CMD_FLG_MORE_FRAG;
222
223         if (ieee80211_is_data_qos(fc)) {
224                 u8 *qc = ieee80211_get_qos_ctl(hdr);
225                 tx_cmd->tid_tspec = qc[0] & 0xf;
226                 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
227                 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
228                         offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
229         } else if (ieee80211_is_back_req(fc)) {
230                 struct ieee80211_bar *bar = (void *)skb->data;
231                 u16 control = le16_to_cpu(bar->control);
232                 u16 ssn = le16_to_cpu(bar->start_seq_num);
233
234                 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
235                 tx_cmd->tid_tspec = (control &
236                                      IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
237                         IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
238                 WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
239                 iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
240                                           ssn);
241         } else {
242                 if (ieee80211_is_data(fc))
243                         tx_cmd->tid_tspec = IWL_TID_NON_QOS;
244                 else
245                         tx_cmd->tid_tspec = IWL_MAX_TID_COUNT;
246
247                 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
248                         tx_flags |= TX_CMD_FLG_SEQ_CTL;
249                 else
250                         tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
251         }
252
253         /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */
254         if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
255                 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
256         else
257                 ac = tid_to_mac80211_ac[0];
258
259         tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
260                         TX_CMD_FLG_BT_PRIO_POS;
261
262         if (ieee80211_is_mgmt(fc)) {
263                 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
264                         tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
265                 else if (ieee80211_is_action(fc))
266                         tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
267                 else
268                         tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
269
270                 /* The spec allows Action frames in A-MPDU, we don't support
271                  * it
272                  */
273                 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
274         } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
275                 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
276         } else {
277                 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
278         }
279
280         if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
281             !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
282                 tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
283
284         if (fw_has_capa(&mvm->fw->ucode_capa,
285                         IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
286             ieee80211_action_contains_tpc(skb))
287                 tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
288
289         tx_cmd->tx_flags = cpu_to_le32(tx_flags);
290         /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
291         tx_cmd->len = cpu_to_le16((u16)skb->len);
292         tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
293         tx_cmd->sta_id = sta_id;
294
295         /* padding is inserted later in transport */
296         if (ieee80211_hdrlen(fc) % 4 &&
297             !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
298                 offload_assist |= BIT(TX_CMD_OFFLD_PAD);
299
300         tx_cmd->offload_assist |=
301                 cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info,
302                                             offload_assist));
303 }
304
305 static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
306                                struct ieee80211_tx_info *info,
307                                struct ieee80211_sta *sta)
308 {
309         int rate_idx;
310         u8 rate_plcp;
311         u32 rate_flags;
312
313         /* HT rate doesn't make sense for a non data frame */
314         WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
315                   "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n",
316                   info->control.rates[0].flags,
317                   info->control.rates[0].idx);
318
319         rate_idx = info->control.rates[0].idx;
320         /* if the rate isn't a well known legacy rate, take the lowest one */
321         if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
322                 rate_idx = rate_lowest_index(
323                                 &mvm->nvm_data->bands[info->band], sta);
324
325         /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
326         if (info->band == NL80211_BAND_5GHZ)
327                 rate_idx += IWL_FIRST_OFDM_RATE;
328
329         /* For 2.4 GHZ band, check that there is no need to remap */
330         BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
331
332         /* Get PLCP rate for tx_cmd->rate_n_flags */
333         rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
334
335         if (info->band == NL80211_BAND_2GHZ &&
336             !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
337                 rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
338         else
339                 rate_flags =
340                         BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
341
342         /* Set CCK flag as needed */
343         if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
344                 rate_flags |= RATE_MCS_CCK_MSK;
345
346         return (u32)rate_plcp | rate_flags;
347 }
348
349 /*
350  * Sets the fields in the Tx cmd that are rate related
351  */
352 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
353                             struct ieee80211_tx_info *info,
354                             struct ieee80211_sta *sta, __le16 fc)
355 {
356         /* Set retry limit on RTS packets */
357         tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
358
359         /* Set retry limit on DATA packets and Probe Responses*/
360         if (ieee80211_is_probe_resp(fc)) {
361                 tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
362                 tx_cmd->rts_retry_limit =
363                         min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
364         } else if (ieee80211_is_back_req(fc)) {
365                 tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
366         } else {
367                 tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
368         }
369
370         /*
371          * for data packets, rate info comes from the table inside the fw. This
372          * table is controlled by LINK_QUALITY commands
373          */
374
375         if (ieee80211_is_data(fc) && sta) {
376                 tx_cmd->initial_rate_index = 0;
377                 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
378                 return;
379         } else if (ieee80211_is_back_req(fc)) {
380                 tx_cmd->tx_flags |=
381                         cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
382         }
383
384         mvm->mgmt_last_antenna_idx =
385                 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
386                                      mvm->mgmt_last_antenna_idx);
387
388         /* Set the rate in the TX cmd */
389         tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
390 }
391
392 static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
393                                          u8 *crypto_hdr)
394 {
395         struct ieee80211_key_conf *keyconf = info->control.hw_key;
396         u64 pn;
397
398         pn = atomic64_inc_return(&keyconf->tx_pn);
399         crypto_hdr[0] = pn;
400         crypto_hdr[2] = 0;
401         crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
402         crypto_hdr[1] = pn >> 8;
403         crypto_hdr[4] = pn >> 16;
404         crypto_hdr[5] = pn >> 24;
405         crypto_hdr[6] = pn >> 32;
406         crypto_hdr[7] = pn >> 40;
407 }
408
409 /*
410  * Sets the fields in the Tx cmd that are crypto related
411  */
412 static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
413                                       struct ieee80211_tx_info *info,
414                                       struct iwl_tx_cmd *tx_cmd,
415                                       struct sk_buff *skb_frag,
416                                       int hdrlen)
417 {
418         struct ieee80211_key_conf *keyconf = info->control.hw_key;
419         u8 *crypto_hdr = skb_frag->data + hdrlen;
420         enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
421         u64 pn;
422
423         switch (keyconf->cipher) {
424         case WLAN_CIPHER_SUITE_CCMP:
425                 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
426                 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
427                 break;
428
429         case WLAN_CIPHER_SUITE_TKIP:
430                 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
431                 pn = atomic64_inc_return(&keyconf->tx_pn);
432                 ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
433                 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
434                 break;
435
436         case WLAN_CIPHER_SUITE_WEP104:
437                 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
438                 /* fall through */
439         case WLAN_CIPHER_SUITE_WEP40:
440                 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
441                         ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
442                           TX_CMD_SEC_WEP_KEY_IDX_MSK);
443
444                 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
445                 break;
446         case WLAN_CIPHER_SUITE_GCMP:
447         case WLAN_CIPHER_SUITE_GCMP_256:
448                 type = TX_CMD_SEC_GCMP;
449                 /* Fall through */
450         case WLAN_CIPHER_SUITE_CCMP_256:
451                 /* TODO: Taking the key from the table might introduce a race
452                  * when PTK rekeying is done, having an old packets with a PN
453                  * based on the old key but the message encrypted with a new
454                  * one.
455                  * Need to handle this.
456                  */
457                 tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
458                 tx_cmd->key[0] = keyconf->hw_key_idx;
459                 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
460                 break;
461         default:
462                 tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
463         }
464 }
465
466 /*
467  * Allocates and sets the Tx cmd the driver data pointers in the skb
468  */
469 static struct iwl_device_cmd *
470 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
471                       struct ieee80211_tx_info *info, int hdrlen,
472                       struct ieee80211_sta *sta, u8 sta_id)
473 {
474         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
475         struct iwl_device_cmd *dev_cmd;
476         struct iwl_tx_cmd *tx_cmd;
477
478         dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
479
480         if (unlikely(!dev_cmd))
481                 return NULL;
482
483         /* Make sure we zero enough of dev_cmd */
484         BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
485         BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
486
487         memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
488         dev_cmd->hdr.cmd = TX_CMD;
489
490         if (iwl_mvm_has_new_tx_api(mvm)) {
491                 u16 offload_assist = 0;
492                 u32 rate_n_flags = 0;
493                 u16 flags = 0;
494
495                 if (ieee80211_is_data_qos(hdr->frame_control)) {
496                         u8 *qc = ieee80211_get_qos_ctl(hdr);
497
498                         if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
499                                 offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
500                 }
501
502                 offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info,
503                                                  offload_assist);
504
505                 /* padding is inserted later in transport */
506                 if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
507                     !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
508                         offload_assist |= BIT(TX_CMD_OFFLD_PAD);
509
510                 if (!info->control.hw_key)
511                         flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
512
513                 /* For data packets rate info comes from the fw */
514                 if (!(ieee80211_is_data(hdr->frame_control) && sta)) {
515                         flags |= IWL_TX_FLAGS_CMD_RATE;
516                         rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta);
517                 }
518
519                 if (mvm->trans->cfg->device_family >=
520                     IWL_DEVICE_FAMILY_22560) {
521                         struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
522
523                         cmd->offload_assist |= cpu_to_le32(offload_assist);
524
525                         /* Total # bytes to be transmitted */
526                         cmd->len = cpu_to_le16((u16)skb->len);
527
528                         /* Copy MAC header from skb into command buffer */
529                         memcpy(cmd->hdr, hdr, hdrlen);
530
531                         cmd->flags = cpu_to_le16(flags);
532                         cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
533                 } else {
534                         struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
535
536                         cmd->offload_assist |= cpu_to_le16(offload_assist);
537
538                         /* Total # bytes to be transmitted */
539                         cmd->len = cpu_to_le16((u16)skb->len);
540
541                         /* Copy MAC header from skb into command buffer */
542                         memcpy(cmd->hdr, hdr, hdrlen);
543
544                         cmd->flags = cpu_to_le32(flags);
545                         cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
546                 }
547                 goto out;
548         }
549
550         tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
551
552         if (info->control.hw_key)
553                 iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
554
555         iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
556
557         iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
558
559         /* Copy MAC header from skb into command buffer */
560         memcpy(tx_cmd->hdr, hdr, hdrlen);
561
562 out:
563         return dev_cmd;
564 }
565
566 static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
567                                        struct iwl_device_cmd *cmd)
568 {
569         struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
570
571         memset(&skb_info->status, 0, sizeof(skb_info->status));
572         memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
573
574         skb_info->driver_data[1] = cmd;
575 }
576
577 static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
578                                       struct ieee80211_tx_info *info, __le16 fc)
579 {
580         struct iwl_mvm_vif *mvmvif;
581
582         mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif);
583
584         switch (info->control.vif->type) {
585         case NL80211_IFTYPE_AP:
586         case NL80211_IFTYPE_ADHOC:
587                 /*
588                  * Non-bufferable frames use the broadcast station, thus they
589                  * use the probe queue.
590                  * Also take care of the case where we send a deauth to a
591                  * station that we don't have, or similarly an association
592                  * response (with non-success status) for a station we can't
593                  * accept.
594                  * Also, disassociate frames might happen, particular with
595                  * reason 7 ("Class 3 frame received from nonassociated STA").
596                  */
597                 if (ieee80211_is_mgmt(fc) &&
598                     (!ieee80211_is_bufferable_mmpdu(fc) ||
599                      ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
600                         return mvm->probe_queue;
601                 if (info->hw_queue == info->control.vif->cab_queue)
602                         return mvmvif->cab_queue;
603
604                 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
605                           "fc=0x%02x", le16_to_cpu(fc));
606                 return mvm->probe_queue;
607         case NL80211_IFTYPE_P2P_DEVICE:
608                 if (ieee80211_is_mgmt(fc))
609                         return mvm->p2p_dev_queue;
610                 if (info->hw_queue == info->control.vif->cab_queue)
611                         return mvmvif->cab_queue;
612
613                 WARN_ON_ONCE(1);
614                 return mvm->p2p_dev_queue;
615         default:
616                 WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
617                 return -1;
618         }
619 }
620
621 static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
622                                        struct sk_buff *skb)
623 {
624         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
625         struct iwl_mvm_vif *mvmvif =
626                 iwl_mvm_vif_from_mac80211(info->control.vif);
627         struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
628         int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
629         struct iwl_probe_resp_data *resp_data;
630         u8 *ie, *pos;
631         u8 match[] = {
632                 (WLAN_OUI_WFA >> 16) & 0xff,
633                 (WLAN_OUI_WFA >> 8) & 0xff,
634                 WLAN_OUI_WFA & 0xff,
635                 WLAN_OUI_TYPE_WFA_P2P,
636         };
637
638         rcu_read_lock();
639
640         resp_data = rcu_dereference(mvmvif->probe_resp_data);
641         if (!resp_data)
642                 goto out;
643
644         if (!resp_data->notif.noa_active)
645                 goto out;
646
647         ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
648                                           mgmt->u.probe_resp.variable,
649                                           skb->len - base_len,
650                                           match, 4, 2);
651         if (!ie) {
652                 IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
653                 goto out;
654         }
655
656         if (skb_tailroom(skb) < resp_data->noa_len) {
657                 if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
658                         IWL_ERR(mvm,
659                                 "Failed to reallocate probe resp\n");
660                         goto out;
661                 }
662         }
663
664         pos = skb_put(skb, resp_data->noa_len);
665
666         *pos++ = WLAN_EID_VENDOR_SPECIFIC;
667         /* Set length of IE body (not including ID and length itself) */
668         *pos++ = resp_data->noa_len - 2;
669         *pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
670         *pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
671         *pos++ = WLAN_OUI_WFA & 0xff;
672         *pos++ = WLAN_OUI_TYPE_WFA_P2P;
673
674         memcpy(pos, &resp_data->notif.noa_attr,
675                resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
676
677 out:
678         rcu_read_unlock();
679 }
680
681 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
682 {
683         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
684         struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
685         struct ieee80211_tx_info info;
686         struct iwl_device_cmd *dev_cmd;
687         u8 sta_id;
688         int hdrlen = ieee80211_hdrlen(hdr->frame_control);
689         __le16 fc = hdr->frame_control;
690         int queue;
691
692         /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
693          * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
694          * queue. STATION (HS2.0) uses the auxiliary context of the FW,
695          * and hence needs to be sent on the aux queue
696          */
697         if (skb_info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
698             skb_info->control.vif->type == NL80211_IFTYPE_STATION)
699                 skb_info->hw_queue = mvm->aux_queue;
700
701         memcpy(&info, skb->cb, sizeof(info));
702
703         if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
704                 return -1;
705
706         if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
707                          (!info.control.vif ||
708                           info.hw_queue != info.control.vif->cab_queue)))
709                 return -1;
710
711         queue = info.hw_queue;
712
713         /*
714          * If the interface on which the frame is sent is the P2P_DEVICE
715          * or an AP/GO interface use the broadcast station associated
716          * with it; otherwise if the interface is a managed interface
717          * use the AP station associated with it for multicast traffic
718          * (this is not possible for unicast packets as a TLDS discovery
719          * response are sent without a station entry); otherwise use the
720          * AUX station.
721          */
722         sta_id = mvm->aux_sta.sta_id;
723         if (info.control.vif) {
724                 struct iwl_mvm_vif *mvmvif =
725                         iwl_mvm_vif_from_mac80211(info.control.vif);
726
727                 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
728                     info.control.vif->type == NL80211_IFTYPE_AP ||
729                     info.control.vif->type == NL80211_IFTYPE_ADHOC) {
730                         if (!ieee80211_is_data(hdr->frame_control))
731                                 sta_id = mvmvif->bcast_sta.sta_id;
732                         else
733                                 sta_id = mvmvif->mcast_sta.sta_id;
734
735                         queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
736                                                            hdr->frame_control);
737                         if (queue < 0)
738                                 return -1;
739                 } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
740                            is_multicast_ether_addr(hdr->addr1)) {
741                         u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
742
743                         if (ap_sta_id != IWL_MVM_INVALID_STA)
744                                 sta_id = ap_sta_id;
745                 } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
746                         queue = mvm->snif_queue;
747                         sta_id = mvm->snif_sta.sta_id;
748                 }
749         }
750
751         if (unlikely(ieee80211_is_probe_resp(fc)))
752                 iwl_mvm_probe_resp_set_noa(mvm, skb);
753
754         IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
755
756         dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
757         if (!dev_cmd)
758                 return -1;
759
760         /* From now on, we cannot access info->control */
761         iwl_mvm_skb_prepare_status(skb, dev_cmd);
762
763         if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
764                 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
765                 return -1;
766         }
767
768         return 0;
769 }
770
771 #ifdef CONFIG_INET
772
773 static int
774 iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
775                        netdev_features_t netdev_flags,
776                        struct sk_buff_head *mpdus_skb)
777 {
778         struct sk_buff *tmp, *next;
779         struct ieee80211_hdr *hdr = (void *)skb->data;
780         char cb[sizeof(skb->cb)];
781         u16 i = 0;
782         unsigned int tcp_payload_len;
783         unsigned int mss = skb_shinfo(skb)->gso_size;
784         bool ipv4 = (skb->protocol == htons(ETH_P_IP));
785         u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
786
787         skb_shinfo(skb)->gso_size = num_subframes * mss;
788         memcpy(cb, skb->cb, sizeof(cb));
789
790         next = skb_gso_segment(skb, netdev_flags);
791         skb_shinfo(skb)->gso_size = mss;
792         if (WARN_ON_ONCE(IS_ERR(next)))
793                 return -EINVAL;
794         else if (next)
795                 consume_skb(skb);
796
797         while (next) {
798                 tmp = next;
799                 next = tmp->next;
800
801                 memcpy(tmp->cb, cb, sizeof(tmp->cb));
802                 /*
803                  * Compute the length of all the data added for the A-MSDU.
804                  * This will be used to compute the length to write in the TX
805                  * command. We have: SNAP + IP + TCP for n -1 subframes and
806                  * ETH header for n subframes.
807                  */
808                 tcp_payload_len = skb_tail_pointer(tmp) -
809                         skb_transport_header(tmp) -
810                         tcp_hdrlen(tmp) + tmp->data_len;
811
812                 if (ipv4)
813                         ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
814
815                 if (tcp_payload_len > mss) {
816                         skb_shinfo(tmp)->gso_size = mss;
817                 } else {
818                         if (ieee80211_is_data_qos(hdr->frame_control)) {
819                                 u8 *qc;
820
821                                 if (ipv4)
822                                         ip_send_check(ip_hdr(tmp));
823
824                                 qc = ieee80211_get_qos_ctl((void *)tmp->data);
825                                 *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
826                         }
827                         skb_shinfo(tmp)->gso_size = 0;
828                 }
829
830                 tmp->prev = NULL;
831                 tmp->next = NULL;
832
833                 __skb_queue_tail(mpdus_skb, tmp);
834                 i++;
835         }
836
837         return 0;
838 }
839
840 static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
841                                            struct ieee80211_sta *sta,
842                                            unsigned int tid)
843 {
844         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
845         enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
846         u8 ac = tid_to_mac80211_ac[tid];
847         unsigned int txf;
848         int lmac = IWL_LMAC_24G_INDEX;
849
850         if (iwl_mvm_is_cdb_supported(mvm) &&
851             band == NL80211_BAND_5GHZ)
852                 lmac = IWL_LMAC_5G_INDEX;
853
854         /* For HE redirect to trigger based fifos */
855         if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
856                 ac += 4;
857
858         txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
859
860         /*
861          * Don't send an AMSDU that will be longer than the TXF.
862          * Add a security margin of 256 for the TX command + headers.
863          * We also want to have the start of the next packet inside the
864          * fifo to be able to send bursts.
865          */
866         return min_t(unsigned int, mvmsta->max_amsdu_len,
867                      mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
868 }
869
870 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
871                           struct ieee80211_tx_info *info,
872                           struct ieee80211_sta *sta,
873                           struct sk_buff_head *mpdus_skb)
874 {
875         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
876         struct ieee80211_hdr *hdr = (void *)skb->data;
877         unsigned int mss = skb_shinfo(skb)->gso_size;
878         unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
879         u16 snap_ip_tcp, pad;
880         unsigned int dbg_max_amsdu_len;
881         netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
882         u8 tid;
883
884         snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
885                 tcp_hdrlen(skb);
886
887         dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len);
888
889         if (!mvmsta->max_amsdu_len ||
890             !ieee80211_is_data_qos(hdr->frame_control) ||
891             (!mvmsta->amsdu_enabled && !dbg_max_amsdu_len))
892                 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
893
894         /*
895          * Do not build AMSDU for IPv6 with extension headers.
896          * ask stack to segment and checkum the generated MPDUs for us.
897          */
898         if (skb->protocol == htons(ETH_P_IPV6) &&
899             ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
900             IPPROTO_TCP) {
901                 netdev_flags &= ~NETIF_F_CSUM_MASK;
902                 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
903         }
904
905         tid = ieee80211_get_tid(hdr);
906         if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
907                 return -EINVAL;
908
909         /*
910          * No need to lock amsdu_in_ampdu_allowed since it can't be modified
911          * during an BA session.
912          */
913         if (info->flags & IEEE80211_TX_CTL_AMPDU &&
914             !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed)
915                 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
916
917         if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) ||
918             !(mvmsta->amsdu_enabled & BIT(tid)))
919                 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
920
921         max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid);
922
923         if (unlikely(dbg_max_amsdu_len))
924                 max_amsdu_len = min_t(unsigned int, max_amsdu_len,
925                                       dbg_max_amsdu_len);
926
927         /*
928          * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
929          * supported. This is a spec requirement (IEEE 802.11-2015
930          * section 8.7.3 NOTE 3).
931          */
932         if (info->flags & IEEE80211_TX_CTL_AMPDU &&
933             !sta->vht_cap.vht_supported)
934                 max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
935
936         /* Sub frame header + SNAP + IP header + TCP header + MSS */
937         subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
938         pad = (4 - subf_len) & 0x3;
939
940         /*
941          * If we have N subframes in the A-MSDU, then the A-MSDU's size is
942          * N * subf_len + (N - 1) * pad.
943          */
944         num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
945
946         if (sta->max_amsdu_subframes &&
947             num_subframes > sta->max_amsdu_subframes)
948                 num_subframes = sta->max_amsdu_subframes;
949
950         tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
951                 tcp_hdrlen(skb) + skb->data_len;
952
953         /*
954          * Make sure we have enough TBs for the A-MSDU:
955          *      2 for each subframe
956          *      1 more for each fragment
957          *      1 more for the potential data in the header
958          */
959         if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
960             mvm->trans->max_skb_frags)
961                 num_subframes = 1;
962
963         if (num_subframes > 1)
964                 *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
965
966         /* This skb fits in one single A-MSDU */
967         if (num_subframes * mss >= tcp_payload_len) {
968                 __skb_queue_tail(mpdus_skb, skb);
969                 return 0;
970         }
971
972         /*
973          * Trick the segmentation function to make it
974          * create SKBs that can fit into one A-MSDU.
975          */
976         return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags,
977                                       mpdus_skb);
978 }
979 #else /* CONFIG_INET */
980 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
981                           struct ieee80211_tx_info *info,
982                           struct ieee80211_sta *sta,
983                           struct sk_buff_head *mpdus_skb)
984 {
985         /* Impossible to get TSO with CONFIG_INET */
986         WARN_ON(1);
987
988         return -1;
989 }
990 #endif
991
992 static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
993                                   struct iwl_mvm_sta *mvm_sta, u8 tid,
994                                   struct sk_buff *skb)
995 {
996         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
997         u8 mac_queue = info->hw_queue;
998         struct sk_buff_head *deferred_tx_frames;
999
1000         lockdep_assert_held(&mvm_sta->lock);
1001
1002         mvm_sta->deferred_traffic_tid_map |= BIT(tid);
1003         set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
1004
1005         deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
1006
1007         skb_queue_tail(deferred_tx_frames, skb);
1008
1009         /*
1010          * The first deferred frame should've stopped the MAC queues, so we
1011          * should never get a second deferred frame for the RA/TID.
1012          * In case of GSO the first packet may have been split, so don't warn.
1013          */
1014         if (skb_queue_len(deferred_tx_frames) == 1) {
1015                 iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
1016                 schedule_work(&mvm->add_stream_wk);
1017         }
1018 }
1019
1020 /* Check if there are any timed-out TIDs on a given shared TXQ */
1021 static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
1022 {
1023         unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
1024         unsigned long now = jiffies;
1025         int tid;
1026
1027         if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1028                 return false;
1029
1030         for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1031                 if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
1032                                 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1033                         return true;
1034         }
1035
1036         return false;
1037 }
1038
1039 static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
1040                                struct iwl_mvm_sta *mvmsta,
1041                                int airtime)
1042 {
1043         int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
1044         struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1045
1046         if (mvm->tcm.paused)
1047                 return;
1048
1049         if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
1050                 schedule_delayed_work(&mvm->tcm.work, 0);
1051
1052         mdata->tx.airtime += airtime;
1053 }
1054
1055 static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
1056                                   struct iwl_mvm_sta *mvmsta, int tid)
1057 {
1058         u32 ac = tid_to_mac80211_ac[tid];
1059         int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
1060         struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1061
1062         mdata->tx.pkts[ac]++;
1063 }
1064
1065 /*
1066  * Sets the fields in the Tx cmd that are crypto related
1067  */
1068 static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
1069                            struct ieee80211_tx_info *info,
1070                            struct ieee80211_sta *sta)
1071 {
1072         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1073         struct iwl_mvm_sta *mvmsta;
1074         struct iwl_device_cmd *dev_cmd;
1075         __le16 fc;
1076         u16 seq_number = 0;
1077         u8 tid = IWL_MAX_TID_COUNT;
1078         u16 txq_id = info->hw_queue;
1079         bool is_ampdu = false;
1080         int hdrlen;
1081
1082         mvmsta = iwl_mvm_sta_from_mac80211(sta);
1083         fc = hdr->frame_control;
1084         hdrlen = ieee80211_hdrlen(fc);
1085
1086         if (WARN_ON_ONCE(!mvmsta))
1087                 return -1;
1088
1089         if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
1090                 return -1;
1091
1092         if (unlikely(ieee80211_is_probe_resp(fc)))
1093                 iwl_mvm_probe_resp_set_noa(mvm, skb);
1094
1095         dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
1096                                         sta, mvmsta->sta_id);
1097         if (!dev_cmd)
1098                 goto drop;
1099
1100         /*
1101          * we handle that entirely ourselves -- for uAPSD the firmware
1102          * will always send a notification, and for PS-Poll responses
1103          * we'll notify mac80211 when getting frame status
1104          */
1105         info->flags &= ~IEEE80211_TX_STATUS_EOSP;
1106
1107         spin_lock(&mvmsta->lock);
1108
1109         /* nullfunc frames should go to the MGMT queue regardless of QOS,
1110          * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default
1111          * assignment of MGMT TID
1112          */
1113         if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
1114                 tid = ieee80211_get_tid(hdr);
1115                 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
1116                         goto drop_unlock_sta;
1117
1118                 is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
1119                 if (WARN_ON_ONCE(is_ampdu &&
1120                                  mvmsta->tid_data[tid].state != IWL_AGG_ON))
1121                         goto drop_unlock_sta;
1122
1123                 seq_number = mvmsta->tid_data[tid].seq_number;
1124                 seq_number &= IEEE80211_SCTL_SEQ;
1125
1126                 if (!iwl_mvm_has_new_tx_api(mvm)) {
1127                         struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1128
1129                         hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1130                         hdr->seq_ctrl |= cpu_to_le16(seq_number);
1131                         /* update the tx_cmd hdr as it was already copied */
1132                         tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
1133                 }
1134         } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) {
1135                 tid = IWL_TID_NON_QOS;
1136         }
1137
1138         txq_id = mvmsta->tid_data[tid].txq_id;
1139
1140         WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
1141
1142         /* Check if TXQ needs to be allocated or re-activated */
1143         if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) {
1144                 iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
1145
1146                 /*
1147                  * The frame is now deferred, and the worker scheduled
1148                  * will re-allocate it, so we can free it for now.
1149                  */
1150                 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1151                 spin_unlock(&mvmsta->lock);
1152                 return 0;
1153         }
1154
1155         if (!iwl_mvm_has_new_tx_api(mvm)) {
1156                 /* Keep track of the time of the last frame for this RA/TID */
1157                 mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
1158
1159                 /*
1160                  * If we have timed-out TIDs - schedule the worker that will
1161                  * reconfig the queues and update them
1162                  *
1163                  * Note that the mvm->queue_info_lock isn't being taken here in
1164                  * order to not serialize the TX flow. This isn't dangerous
1165                  * because scheduling mvm->add_stream_wk can't ruin the state,
1166                  * and if we DON'T schedule it due to some race condition then
1167                  * next TX we get here we will.
1168                  */
1169                 if (unlikely(mvm->queue_info[txq_id].status ==
1170                              IWL_MVM_QUEUE_SHARED &&
1171                              iwl_mvm_txq_should_update(mvm, txq_id)))
1172                         schedule_work(&mvm->add_stream_wk);
1173         }
1174
1175         IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
1176                      tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
1177
1178         /* From now on, we cannot access info->control */
1179         iwl_mvm_skb_prepare_status(skb, dev_cmd);
1180
1181         if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
1182                 goto drop_unlock_sta;
1183
1184         if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
1185                 mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
1186
1187         spin_unlock(&mvmsta->lock);
1188
1189         iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid);
1190
1191         return 0;
1192
1193 drop_unlock_sta:
1194         iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1195         spin_unlock(&mvmsta->lock);
1196 drop:
1197         return -1;
1198 }
1199
1200 int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
1201                    struct ieee80211_sta *sta)
1202 {
1203         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1204         struct ieee80211_tx_info info;
1205         struct sk_buff_head mpdus_skbs;
1206         unsigned int payload_len;
1207         int ret;
1208
1209         if (WARN_ON_ONCE(!mvmsta))
1210                 return -1;
1211
1212         if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
1213                 return -1;
1214
1215         memcpy(&info, skb->cb, sizeof(info));
1216
1217         if (!skb_is_gso(skb))
1218                 return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
1219
1220         payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
1221                 tcp_hdrlen(skb) + skb->data_len;
1222
1223         if (payload_len <= skb_shinfo(skb)->gso_size)
1224                 return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
1225
1226         __skb_queue_head_init(&mpdus_skbs);
1227
1228         ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
1229         if (ret)
1230                 return ret;
1231
1232         if (WARN_ON(skb_queue_empty(&mpdus_skbs)))
1233                 return ret;
1234
1235         while (!skb_queue_empty(&mpdus_skbs)) {
1236                 skb = __skb_dequeue(&mpdus_skbs);
1237
1238                 ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
1239                 if (ret) {
1240                         __skb_queue_purge(&mpdus_skbs);
1241                         return ret;
1242                 }
1243         }
1244
1245         return 0;
1246 }
1247
1248 static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
1249                                       struct ieee80211_sta *sta, u8 tid)
1250 {
1251         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1252         struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1253         struct ieee80211_vif *vif = mvmsta->vif;
1254         u16 normalized_ssn;
1255
1256         lockdep_assert_held(&mvmsta->lock);
1257
1258         if ((tid_data->state == IWL_AGG_ON ||
1259              tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
1260             iwl_mvm_tid_queued(mvm, tid_data) == 0) {
1261                 /*
1262                  * Now that this aggregation or DQA queue is empty tell
1263                  * mac80211 so it knows we no longer have frames buffered for
1264                  * the station on this TID (for the TIM bitmap calculation.)
1265                  */
1266                 ieee80211_sta_set_buffered(sta, tid, false);
1267         }
1268
1269         /*
1270          * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
1271          * to align the wrap around of ssn so we compare relevant values.
1272          */
1273         normalized_ssn = tid_data->ssn;
1274         if (mvm->trans->cfg->gen2)
1275                 normalized_ssn &= 0xff;
1276
1277         if (normalized_ssn != tid_data->next_reclaimed)
1278                 return;
1279
1280         switch (tid_data->state) {
1281         case IWL_EMPTYING_HW_QUEUE_ADDBA:
1282                 IWL_DEBUG_TX_QUEUES(mvm,
1283                                     "Can continue addBA flow ssn = next_recl = %d\n",
1284                                     tid_data->next_reclaimed);
1285                 tid_data->state = IWL_AGG_STARTING;
1286                 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1287                 break;
1288
1289         case IWL_EMPTYING_HW_QUEUE_DELBA:
1290                 IWL_DEBUG_TX_QUEUES(mvm,
1291                                     "Can continue DELBA flow ssn = next_recl = %d\n",
1292                                     tid_data->next_reclaimed);
1293                 tid_data->state = IWL_AGG_OFF;
1294                 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1295                 break;
1296
1297         default:
1298                 break;
1299         }
1300 }
1301
1302 #ifdef CONFIG_IWLWIFI_DEBUG
1303 const char *iwl_mvm_get_tx_fail_reason(u32 status)
1304 {
1305 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1306 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1307
1308         switch (status & TX_STATUS_MSK) {
1309         case TX_STATUS_SUCCESS:
1310                 return "SUCCESS";
1311         TX_STATUS_POSTPONE(DELAY);
1312         TX_STATUS_POSTPONE(FEW_BYTES);
1313         TX_STATUS_POSTPONE(BT_PRIO);
1314         TX_STATUS_POSTPONE(QUIET_PERIOD);
1315         TX_STATUS_POSTPONE(CALC_TTAK);
1316         TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1317         TX_STATUS_FAIL(SHORT_LIMIT);
1318         TX_STATUS_FAIL(LONG_LIMIT);
1319         TX_STATUS_FAIL(UNDERRUN);
1320         TX_STATUS_FAIL(DRAIN_FLOW);
1321         TX_STATUS_FAIL(RFKILL_FLUSH);
1322         TX_STATUS_FAIL(LIFE_EXPIRE);
1323         TX_STATUS_FAIL(DEST_PS);
1324         TX_STATUS_FAIL(HOST_ABORTED);
1325         TX_STATUS_FAIL(BT_RETRY);
1326         TX_STATUS_FAIL(STA_INVALID);
1327         TX_STATUS_FAIL(FRAG_DROPPED);
1328         TX_STATUS_FAIL(TID_DISABLE);
1329         TX_STATUS_FAIL(FIFO_FLUSHED);
1330         TX_STATUS_FAIL(SMALL_CF_POLL);
1331         TX_STATUS_FAIL(FW_DROP);
1332         TX_STATUS_FAIL(STA_COLOR_MISMATCH);
1333         }
1334
1335         return "UNKNOWN";
1336
1337 #undef TX_STATUS_FAIL
1338 #undef TX_STATUS_POSTPONE
1339 }
1340 #endif /* CONFIG_IWLWIFI_DEBUG */
1341
1342 void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
1343                                enum nl80211_band band,
1344                                struct ieee80211_tx_rate *r)
1345 {
1346         if (rate_n_flags & RATE_HT_MCS_GF_MSK)
1347                 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1348         switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
1349         case RATE_MCS_CHAN_WIDTH_20:
1350                 break;
1351         case RATE_MCS_CHAN_WIDTH_40:
1352                 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1353                 break;
1354         case RATE_MCS_CHAN_WIDTH_80:
1355                 r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
1356                 break;
1357         case RATE_MCS_CHAN_WIDTH_160:
1358                 r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
1359                 break;
1360         }
1361         if (rate_n_flags & RATE_MCS_SGI_MSK)
1362                 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1363         if (rate_n_flags & RATE_MCS_HT_MSK) {
1364                 r->flags |= IEEE80211_TX_RC_MCS;
1365                 r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
1366         } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
1367                 ieee80211_rate_set_vht(
1368                         r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
1369                         ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
1370                                                 RATE_VHT_MCS_NSS_POS) + 1);
1371                 r->flags |= IEEE80211_TX_RC_VHT_MCS;
1372         } else {
1373                 r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
1374                                                              band);
1375         }
1376 }
1377
1378 /**
1379  * translate ucode response to mac80211 tx status control values
1380  */
1381 static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
1382                                         struct ieee80211_tx_info *info)
1383 {
1384         struct ieee80211_tx_rate *r = &info->status.rates[0];
1385
1386         info->status.antenna =
1387                 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1388         iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
1389 }
1390
1391 static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
1392                                             u32 status)
1393 {
1394         struct iwl_fw_dbg_trigger_tlv *trig;
1395         struct iwl_fw_dbg_trigger_tx_status *status_trig;
1396         int i;
1397
1398         trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
1399                                      FW_DBG_TRIGGER_TX_STATUS);
1400         if (!trig)
1401                 return;
1402
1403         status_trig = (void *)trig->data;
1404
1405         for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
1406                 /* don't collect on status 0 */
1407                 if (!status_trig->statuses[i].status)
1408                         break;
1409
1410                 if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
1411                         continue;
1412
1413                 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1414                                         "Tx status %d was received",
1415                                         status & TX_STATUS_MSK);
1416                 break;
1417         }
1418 }
1419
1420 /**
1421  * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
1422  * @tx_resp: the Tx response from the fw (agg or non-agg)
1423  *
1424  * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
1425  * it can't know that everything will go well until the end of the AMPDU, it
1426  * can't know in advance the number of MPDUs that will be sent in the current
1427  * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
1428  * Hence, it can't know in advance what the SSN of the SCD will be at the end
1429  * of the batch. This is why the SSN of the SCD is written at the end of the
1430  * whole struct at a variable offset. This function knows how to cope with the
1431  * variable offset and returns the SSN of the SCD.
1432  */
1433 static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
1434                                       struct iwl_mvm_tx_resp *tx_resp)
1435 {
1436         return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
1437                             tx_resp->frame_count) & 0xfff;
1438 }
1439
1440 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1441                                      struct iwl_rx_packet *pkt)
1442 {
1443         struct ieee80211_sta *sta;
1444         u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1445         int txq_id = SEQ_TO_QUEUE(sequence);
1446         /* struct iwl_mvm_tx_resp_v3 is almost the same */
1447         struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1448         int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1449         int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1450         struct agg_tx_status *agg_status =
1451                 iwl_mvm_get_agg_status(mvm, tx_resp);
1452         u32 status = le16_to_cpu(agg_status->status);
1453         u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
1454         struct iwl_mvm_sta *mvmsta;
1455         struct sk_buff_head skbs;
1456         u8 skb_freed = 0;
1457         u8 lq_color;
1458         u16 next_reclaimed, seq_ctl;
1459         bool is_ndp = false;
1460
1461         __skb_queue_head_init(&skbs);
1462
1463         if (iwl_mvm_has_new_tx_api(mvm))
1464                 txq_id = le16_to_cpu(tx_resp->tx_queue);
1465
1466         seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
1467
1468         /* we can free until ssn % q.n_bd not inclusive */
1469         iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
1470
1471         while (!skb_queue_empty(&skbs)) {
1472                 struct sk_buff *skb = __skb_dequeue(&skbs);
1473                 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1474                 struct ieee80211_hdr *hdr = (void *)skb->data;
1475                 bool flushed = false;
1476
1477                 skb_freed++;
1478
1479                 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1480
1481                 memset(&info->status, 0, sizeof(info->status));
1482
1483                 /* inform mac80211 about what happened with the frame */
1484                 switch (status & TX_STATUS_MSK) {
1485                 case TX_STATUS_SUCCESS:
1486                 case TX_STATUS_DIRECT_DONE:
1487                         info->flags |= IEEE80211_TX_STAT_ACK;
1488                         break;
1489                 case TX_STATUS_FAIL_FIFO_FLUSHED:
1490                 case TX_STATUS_FAIL_DRAIN_FLOW:
1491                         flushed = true;
1492                         break;
1493                 case TX_STATUS_FAIL_DEST_PS:
1494                         /* the FW should have stopped the queue and not
1495                          * return this status
1496                          */
1497                         WARN_ON(1);
1498                         info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1499                         break;
1500                 default:
1501                         break;
1502                 }
1503
1504                 /*
1505                  * If we are freeing multiple frames, mark all the frames
1506                  * but the first one as acked, since they were acknowledged
1507                  * before
1508                  * */
1509                 if (skb_freed > 1)
1510                         info->flags |= IEEE80211_TX_STAT_ACK;
1511
1512                 iwl_mvm_tx_status_check_trigger(mvm, status);
1513
1514                 info->status.rates[0].count = tx_resp->failure_frame + 1;
1515                 iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
1516                                             info);
1517                 info->status.status_driver_data[1] =
1518                         (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
1519
1520                 /* Single frame failure in an AMPDU queue => send BAR */
1521                 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1522                     !(info->flags & IEEE80211_TX_STAT_ACK) &&
1523                     !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed)
1524                         info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1525                 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1526
1527                 /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */
1528                 if (ieee80211_is_back_req(hdr->frame_control))
1529                         seq_ctl = 0;
1530                 else if (status != TX_STATUS_SUCCESS)
1531                         seq_ctl = le16_to_cpu(hdr->seq_ctrl);
1532
1533                 if (unlikely(!seq_ctl)) {
1534                         struct ieee80211_hdr *hdr = (void *)skb->data;
1535
1536                         /*
1537                          * If it is an NDP, we can't update next_reclaim since
1538                          * its sequence control is 0. Note that for that same
1539                          * reason, NDPs are never sent to A-MPDU'able queues
1540                          * so that we can never have more than one freed frame
1541                          * for a single Tx resonse (see WARN_ON below).
1542                          */
1543                         if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1544                                 is_ndp = true;
1545                 }
1546
1547                 /*
1548                  * TODO: this is not accurate if we are freeing more than one
1549                  * packet.
1550                  */
1551                 info->status.tx_time =
1552                         le16_to_cpu(tx_resp->wireless_media_time);
1553                 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
1554                 lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
1555                 info->status.status_driver_data[0] =
1556                         RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
1557
1558                 ieee80211_tx_status(mvm->hw, skb);
1559         }
1560
1561         /* This is an aggregation queue or might become one, so we use
1562          * the ssn since: ssn = wifi seq_num % 256.
1563          * The seq_ctl is the sequence control of the packet to which
1564          * this Tx response relates. But if there is a hole in the
1565          * bitmap of the BA we received, this Tx response may allow to
1566          * reclaim the hole and all the subsequent packets that were
1567          * already acked. In that case, seq_ctl != ssn, and the next
1568          * packet to be reclaimed will be ssn and not seq_ctl. In that
1569          * case, several packets will be reclaimed even if
1570          * frame_count = 1.
1571          *
1572          * The ssn is the index (% 256) of the latest packet that has
1573          * treated (acked / dropped) + 1.
1574          */
1575         next_reclaimed = ssn;
1576
1577         IWL_DEBUG_TX_REPLY(mvm,
1578                            "TXQ %d status %s (0x%08x)\n",
1579                            txq_id, iwl_mvm_get_tx_fail_reason(status), status);
1580
1581         IWL_DEBUG_TX_REPLY(mvm,
1582                            "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
1583                            le32_to_cpu(tx_resp->initial_rate),
1584                            tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
1585                            ssn, next_reclaimed, seq_ctl);
1586
1587         rcu_read_lock();
1588
1589         sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1590         /*
1591          * sta can't be NULL otherwise it'd mean that the sta has been freed in
1592          * the firmware while we still have packets for it in the Tx queues.
1593          */
1594         if (WARN_ON_ONCE(!sta))
1595                 goto out;
1596
1597         if (!IS_ERR(sta)) {
1598                 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1599
1600                 iwl_mvm_tx_airtime(mvm, mvmsta,
1601                                    le16_to_cpu(tx_resp->wireless_media_time));
1602
1603                 if (sta->wme && tid != IWL_MGMT_TID) {
1604                         struct iwl_mvm_tid_data *tid_data =
1605                                 &mvmsta->tid_data[tid];
1606                         bool send_eosp_ndp = false;
1607
1608                         spin_lock_bh(&mvmsta->lock);
1609
1610                         if (!is_ndp) {
1611                                 tid_data->next_reclaimed = next_reclaimed;
1612                                 IWL_DEBUG_TX_REPLY(mvm,
1613                                                    "Next reclaimed packet:%d\n",
1614                                                    next_reclaimed);
1615                         } else {
1616                                 IWL_DEBUG_TX_REPLY(mvm,
1617                                                    "NDP - don't update next_reclaimed\n");
1618                         }
1619
1620                         iwl_mvm_check_ratid_empty(mvm, sta, tid);
1621
1622                         if (mvmsta->sleep_tx_count) {
1623                                 mvmsta->sleep_tx_count--;
1624                                 if (mvmsta->sleep_tx_count &&
1625                                     !iwl_mvm_tid_queued(mvm, tid_data)) {
1626                                         /*
1627                                          * The number of frames in the queue
1628                                          * dropped to 0 even if we sent less
1629                                          * frames than we thought we had on the
1630                                          * Tx queue.
1631                                          * This means we had holes in the BA
1632                                          * window that we just filled, ask
1633                                          * mac80211 to send EOSP since the
1634                                          * firmware won't know how to do that.
1635                                          * Send NDP and the firmware will send
1636                                          * EOSP notification that will trigger
1637                                          * a call to ieee80211_sta_eosp().
1638                                          */
1639                                         send_eosp_ndp = true;
1640                                 }
1641                         }
1642
1643                         spin_unlock_bh(&mvmsta->lock);
1644                         if (send_eosp_ndp) {
1645                                 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta,
1646                                         IEEE80211_FRAME_RELEASE_UAPSD,
1647                                         1, tid, false, false);
1648                                 mvmsta->sleep_tx_count = 0;
1649                                 ieee80211_send_eosp_nullfunc(sta, tid);
1650                         }
1651                 }
1652
1653                 if (mvmsta->next_status_eosp) {
1654                         mvmsta->next_status_eosp = false;
1655                         ieee80211_sta_eosp(sta);
1656                 }
1657         } else {
1658                 mvmsta = NULL;
1659         }
1660
1661 out:
1662         rcu_read_unlock();
1663 }
1664
1665 #ifdef CONFIG_IWLWIFI_DEBUG
1666 #define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
1667 static const char *iwl_get_agg_tx_status(u16 status)
1668 {
1669         switch (status & AGG_TX_STATE_STATUS_MSK) {
1670         AGG_TX_STATE_(TRANSMITTED);
1671         AGG_TX_STATE_(UNDERRUN);
1672         AGG_TX_STATE_(BT_PRIO);
1673         AGG_TX_STATE_(FEW_BYTES);
1674         AGG_TX_STATE_(ABORT);
1675         AGG_TX_STATE_(TX_ON_AIR_DROP);
1676         AGG_TX_STATE_(LAST_SENT_TRY_CNT);
1677         AGG_TX_STATE_(LAST_SENT_BT_KILL);
1678         AGG_TX_STATE_(SCD_QUERY);
1679         AGG_TX_STATE_(TEST_BAD_CRC32);
1680         AGG_TX_STATE_(RESPONSE);
1681         AGG_TX_STATE_(DUMP_TX);
1682         AGG_TX_STATE_(DELAY_TX);
1683         }
1684
1685         return "UNKNOWN";
1686 }
1687
1688 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1689                                       struct iwl_rx_packet *pkt)
1690 {
1691         struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1692         struct agg_tx_status *frame_status =
1693                 iwl_mvm_get_agg_status(mvm, tx_resp);
1694         int i;
1695
1696         for (i = 0; i < tx_resp->frame_count; i++) {
1697                 u16 fstatus = le16_to_cpu(frame_status[i].status);
1698
1699                 IWL_DEBUG_TX_REPLY(mvm,
1700                                    "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
1701                                    iwl_get_agg_tx_status(fstatus),
1702                                    fstatus & AGG_TX_STATE_STATUS_MSK,
1703                                    (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
1704                                         AGG_TX_STATE_TRY_CNT_POS,
1705                                    le16_to_cpu(frame_status[i].sequence));
1706         }
1707 }
1708 #else
1709 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1710                                       struct iwl_rx_packet *pkt)
1711 {}
1712 #endif /* CONFIG_IWLWIFI_DEBUG */
1713
1714 static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
1715                                   struct iwl_rx_packet *pkt)
1716 {
1717         struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1718         int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1719         int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1720         u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1721         struct iwl_mvm_sta *mvmsta;
1722         int queue = SEQ_TO_QUEUE(sequence);
1723         struct ieee80211_sta *sta;
1724
1725         if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
1726                          (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
1727                 return;
1728
1729         iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
1730
1731         rcu_read_lock();
1732
1733         mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1734
1735         sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1736         if (WARN_ON_ONCE(!sta || !sta->wme)) {
1737                 rcu_read_unlock();
1738                 return;
1739         }
1740
1741         if (!WARN_ON_ONCE(!mvmsta)) {
1742                 mvmsta->tid_data[tid].rate_n_flags =
1743                         le32_to_cpu(tx_resp->initial_rate);
1744                 mvmsta->tid_data[tid].tx_time =
1745                         le16_to_cpu(tx_resp->wireless_media_time);
1746                 mvmsta->tid_data[tid].lq_color =
1747                         TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
1748                 iwl_mvm_tx_airtime(mvm, mvmsta,
1749                                    le16_to_cpu(tx_resp->wireless_media_time));
1750         }
1751
1752         rcu_read_unlock();
1753 }
1754
1755 void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1756 {
1757         struct iwl_rx_packet *pkt = rxb_addr(rxb);
1758         struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1759
1760         if (tx_resp->frame_count == 1)
1761                 iwl_mvm_rx_tx_cmd_single(mvm, pkt);
1762         else
1763                 iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
1764 }
1765
1766 static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
1767                                int txq, int index,
1768                                struct ieee80211_tx_info *ba_info, u32 rate)
1769 {
1770         struct sk_buff_head reclaimed_skbs;
1771         struct iwl_mvm_tid_data *tid_data;
1772         struct ieee80211_sta *sta;
1773         struct iwl_mvm_sta *mvmsta;
1774         struct sk_buff *skb;
1775         int freed;
1776
1777         if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
1778                       tid > IWL_MAX_TID_COUNT,
1779                       "sta_id %d tid %d", sta_id, tid))
1780                 return;
1781
1782         rcu_read_lock();
1783
1784         sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1785
1786         /* Reclaiming frames for a station that has been deleted ? */
1787         if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
1788                 rcu_read_unlock();
1789                 return;
1790         }
1791
1792         mvmsta = iwl_mvm_sta_from_mac80211(sta);
1793         tid_data = &mvmsta->tid_data[tid];
1794
1795         if (tid_data->txq_id != txq) {
1796                 IWL_ERR(mvm,
1797                         "invalid BA notification: Q %d, tid %d\n",
1798                         tid_data->txq_id, tid);
1799                 rcu_read_unlock();
1800                 return;
1801         }
1802
1803         spin_lock_bh(&mvmsta->lock);
1804
1805         __skb_queue_head_init(&reclaimed_skbs);
1806
1807         /*
1808          * Release all TFDs before the SSN, i.e. all TFDs in front of
1809          * block-ack window (we assume that they've been successfully
1810          * transmitted ... if not, it's too late anyway).
1811          */
1812         iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
1813
1814         tid_data->next_reclaimed = index;
1815
1816         iwl_mvm_check_ratid_empty(mvm, sta, tid);
1817
1818         freed = 0;
1819
1820         /* pack lq color from tid_data along the reduced txp */
1821         ba_info->status.status_driver_data[0] =
1822                 RS_DRV_DATA_PACK(tid_data->lq_color,
1823                                  ba_info->status.status_driver_data[0]);
1824         ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
1825
1826         skb_queue_walk(&reclaimed_skbs, skb) {
1827                 struct ieee80211_hdr *hdr = (void *)skb->data;
1828                 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1829
1830                 if (ieee80211_is_data_qos(hdr->frame_control))
1831                         freed++;
1832                 else
1833                         WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
1834
1835                 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1836
1837                 memset(&info->status, 0, sizeof(info->status));
1838                 /* Packet was transmitted successfully, failures come as single
1839                  * frames because before failing a frame the firmware transmits
1840                  * it without aggregation at least once.
1841                  */
1842                 info->flags |= IEEE80211_TX_STAT_ACK;
1843
1844                 /* this is the first skb we deliver in this batch */
1845                 /* put the rate scaling data there */
1846                 if (freed == 1) {
1847                         info->flags |= IEEE80211_TX_STAT_AMPDU;
1848                         memcpy(&info->status, &ba_info->status,
1849                                sizeof(ba_info->status));
1850                         iwl_mvm_hwrate_to_tx_status(rate, info);
1851                 }
1852         }
1853
1854         spin_unlock_bh(&mvmsta->lock);
1855
1856         /* We got a BA notif with 0 acked or scd_ssn didn't progress which is
1857          * possible (i.e. first MPDU in the aggregation wasn't acked)
1858          * Still it's important to update RS about sent vs. acked.
1859          */
1860         if (skb_queue_empty(&reclaimed_skbs)) {
1861                 struct ieee80211_chanctx_conf *chanctx_conf = NULL;
1862
1863                 if (mvmsta->vif)
1864                         chanctx_conf =
1865                                 rcu_dereference(mvmsta->vif->chanctx_conf);
1866
1867                 if (WARN_ON_ONCE(!chanctx_conf))
1868                         goto out;
1869
1870                 ba_info->band = chanctx_conf->def.chan->band;
1871                 iwl_mvm_hwrate_to_tx_status(rate, ba_info);
1872
1873                 if (!iwl_mvm_has_tlc_offload(mvm)) {
1874                         IWL_DEBUG_TX_REPLY(mvm,
1875                                            "No reclaim. Update rs directly\n");
1876                         iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
1877                 }
1878         }
1879
1880 out:
1881         rcu_read_unlock();
1882
1883         while (!skb_queue_empty(&reclaimed_skbs)) {
1884                 skb = __skb_dequeue(&reclaimed_skbs);
1885                 ieee80211_tx_status(mvm->hw, skb);
1886         }
1887 }
1888
1889 void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1890 {
1891         struct iwl_rx_packet *pkt = rxb_addr(rxb);
1892         int sta_id, tid, txq, index;
1893         struct ieee80211_tx_info ba_info = {};
1894         struct iwl_mvm_ba_notif *ba_notif;
1895         struct iwl_mvm_tid_data *tid_data;
1896         struct iwl_mvm_sta *mvmsta;
1897
1898         ba_info.flags = IEEE80211_TX_STAT_AMPDU;
1899
1900         if (iwl_mvm_has_new_tx_api(mvm)) {
1901                 struct iwl_mvm_compressed_ba_notif *ba_res =
1902                         (void *)pkt->data;
1903                 u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
1904                 int i;
1905
1906                 sta_id = ba_res->sta_id;
1907                 ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
1908                 ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
1909                 ba_info.status.tx_time =
1910                         (u16)le32_to_cpu(ba_res->wireless_time);
1911                 ba_info.status.status_driver_data[0] =
1912                         (void *)(uintptr_t)ba_res->reduced_txp;
1913
1914                 if (!le16_to_cpu(ba_res->tfd_cnt))
1915                         goto out;
1916
1917                 rcu_read_lock();
1918
1919                 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1920                 if (!mvmsta)
1921                         goto out_unlock;
1922
1923                 /* Free per TID */
1924                 for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) {
1925                         struct iwl_mvm_compressed_ba_tfd *ba_tfd =
1926                                 &ba_res->tfd[i];
1927
1928                         tid = ba_tfd->tid;
1929                         if (tid == IWL_MGMT_TID)
1930                                 tid = IWL_MAX_TID_COUNT;
1931
1932                         mvmsta->tid_data[i].lq_color = lq_color;
1933                         iwl_mvm_tx_reclaim(mvm, sta_id, tid,
1934                                            (int)(le16_to_cpu(ba_tfd->q_num)),
1935                                            le16_to_cpu(ba_tfd->tfd_index),
1936                                            &ba_info,
1937                                            le32_to_cpu(ba_res->tx_rate));
1938                 }
1939
1940                 iwl_mvm_tx_airtime(mvm, mvmsta,
1941                                    le32_to_cpu(ba_res->wireless_time));
1942 out_unlock:
1943                 rcu_read_unlock();
1944 out:
1945                 IWL_DEBUG_TX_REPLY(mvm,
1946                                    "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
1947                                    sta_id, le32_to_cpu(ba_res->flags),
1948                                    le16_to_cpu(ba_res->txed),
1949                                    le16_to_cpu(ba_res->done));
1950                 return;
1951         }
1952
1953         ba_notif = (void *)pkt->data;
1954         sta_id = ba_notif->sta_id;
1955         tid = ba_notif->tid;
1956         /* "flow" corresponds to Tx queue */
1957         txq = le16_to_cpu(ba_notif->scd_flow);
1958         /* "ssn" is start of block-ack Tx window, corresponds to index
1959          * (in Tx queue's circular buffer) of first TFD/frame in window */
1960         index = le16_to_cpu(ba_notif->scd_ssn);
1961
1962         rcu_read_lock();
1963         mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1964         if (WARN_ON_ONCE(!mvmsta)) {
1965                 rcu_read_unlock();
1966                 return;
1967         }
1968
1969         tid_data = &mvmsta->tid_data[tid];
1970
1971         ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
1972         ba_info.status.ampdu_len = ba_notif->txed;
1973         ba_info.status.tx_time = tid_data->tx_time;
1974         ba_info.status.status_driver_data[0] =
1975                 (void *)(uintptr_t)ba_notif->reduced_txp;
1976
1977         rcu_read_unlock();
1978
1979         iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
1980                            tid_data->rate_n_flags);
1981
1982         IWL_DEBUG_TX_REPLY(mvm,
1983                            "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
1984                            ba_notif->sta_addr, ba_notif->sta_id);
1985
1986         IWL_DEBUG_TX_REPLY(mvm,
1987                            "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1988                            ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
1989                            le64_to_cpu(ba_notif->bitmap), txq, index,
1990                            ba_notif->txed, ba_notif->txed_2_done);
1991
1992         IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
1993                            ba_notif->reduced_txp);
1994 }
1995
1996 /*
1997  * Note that there are transports that buffer frames before they reach
1998  * the firmware. This means that after flush_tx_path is called, the
1999  * queue might not be empty. The race-free way to handle this is to:
2000  * 1) set the station as draining
2001  * 2) flush the Tx path
2002  * 3) wait for the transport queues to be empty
2003  */
2004 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
2005 {
2006         int ret;
2007         struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
2008                 .queues_ctl = cpu_to_le32(tfd_msk),
2009                 .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
2010         };
2011
2012         WARN_ON(iwl_mvm_has_new_tx_api(mvm));
2013
2014         ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
2015                                    sizeof(flush_cmd), &flush_cmd);
2016         if (ret)
2017                 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
2018         return ret;
2019 }
2020
2021 int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
2022                            u16 tids, u32 flags)
2023 {
2024         int ret;
2025         struct iwl_tx_path_flush_cmd flush_cmd = {
2026                 .sta_id = cpu_to_le32(sta_id),
2027                 .tid_mask = cpu_to_le16(tids),
2028         };
2029
2030         WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2031
2032         ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
2033                                    sizeof(flush_cmd), &flush_cmd);
2034         if (ret)
2035                 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
2036         return ret;
2037 }
2038
2039 int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
2040 {
2041         struct iwl_mvm_int_sta *int_sta = sta;
2042         struct iwl_mvm_sta *mvm_sta = sta;
2043
2044         BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) !=
2045                      offsetof(struct iwl_mvm_sta, sta_id));
2046
2047         if (iwl_mvm_has_new_tx_api(mvm))
2048                 return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
2049                                               0xff | BIT(IWL_MGMT_TID), flags);
2050
2051         if (internal)
2052                 return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
2053                                              flags);
2054
2055         return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags);
2056 }