1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2017 Intel Deutschland GmbH
9 * Copyright(c) 2018 - 2019 Intel Corporation
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
22 * Copyright(c) 2017 Intel Deutschland GmbH
23 * Copyright(c) 2018 - 2019 Intel Corporation
24 * All rights reserved.
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
30 * * Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * * Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in
34 * the documentation and/or other materials provided with the
36 * * Neither the name Intel Corporation nor the names of its
37 * contributors may be used to endorse or promote products derived
38 * from this software without specific prior written permission.
40 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 *****************************************************************************/
53 #include <linux/pm_runtime.h>
55 #include <linux/tcp.h>
57 #include "iwl-debug.h"
61 #include "fw/api/tx.h"
64 * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
66 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
68 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
72 * This function can be called before the op_mode disabled the
73 * queues. This happens when we have an rfkill interrupt.
74 * Since we stop Tx altogether - mark the queues as stopped.
76 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
77 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
79 /* Unmap DMA from host system and free skb's */
80 for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
81 if (!trans_pcie->txq[txq_id])
83 iwl_pcie_gen2_txq_unmap(trans, txq_id);
88 * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
90 void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
91 struct iwl_txq *txq, u16 byte_cnt,
94 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
95 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
96 struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
97 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
98 u8 filled_tfd_size, num_fetch_chunks;
102 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
105 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
106 num_tbs * sizeof(struct iwl_tfh_tb);
108 * filled_tfd_size contains the number of filled bytes in the TFD.
109 * Dividing it by 64 will give the number of chunks to fetch
110 * to SRAM- 0 for one chunk, 1 for 2 and so on.
111 * If, for example, TFD contains only 3 TBs then 32 bytes
112 * of the TFD are used, and only one chunk of 64 bytes should
115 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
117 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
118 /* Starting from 22560, the HW expects bytes */
119 WARN_ON(trans_pcie->bc_table_dword);
120 WARN_ON(len > 0x3FFF);
121 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
122 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
124 /* Until 22560, the HW expects DW */
125 WARN_ON(!trans_pcie->bc_table_dword);
126 len = DIV_ROUND_UP(len, 4);
127 WARN_ON(len > 0xFFF);
128 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
129 scd_bc_tbl->tfd_offset[idx] = bc_ent;
134 * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
136 void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
139 lockdep_assert_held(&txq->lock);
141 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
144 * if not in power-save mode, uCode will never sleep when we're
145 * trying to tx (during RFKILL, we're not trying to tx).
147 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
150 static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans,
151 struct iwl_tfh_tfd *tfd)
153 return le16_to_cpu(tfd->num_tbs) & 0x1f;
156 static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
157 struct iwl_cmd_meta *meta,
158 struct iwl_tfh_tfd *tfd)
160 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
163 /* Sanity check on number of chunks */
164 num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
166 if (num_tbs > trans_pcie->max_tbs) {
167 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
171 /* first TB is never freed - it's the bidirectional DMA data */
172 for (i = 1; i < num_tbs; i++) {
173 if (meta->tbs & BIT(i))
174 dma_unmap_page(trans->dev,
175 le64_to_cpu(tfd->tbs[i].addr),
176 le16_to_cpu(tfd->tbs[i].tb_len),
179 dma_unmap_single(trans->dev,
180 le64_to_cpu(tfd->tbs[i].addr),
181 le16_to_cpu(tfd->tbs[i].tb_len),
188 static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
190 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
191 * idx is bounded by n_window
193 int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
195 lockdep_assert_held(&txq->lock);
197 iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
198 iwl_pcie_get_tfd(trans, txq, idx));
204 skb = txq->entries[idx].skb;
206 /* Can be called from irqs-disabled context
207 * If skb is not NULL, it means that the whole queue is being
208 * freed and that the queue is not empty - free the skb
211 iwl_op_mode_free_skb(trans->op_mode, skb);
212 txq->entries[idx].skb = NULL;
217 static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
218 struct iwl_tfh_tfd *tfd, dma_addr_t addr,
221 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
222 int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
223 struct iwl_tfh_tb *tb;
225 if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
229 /* Each TFD can point to a maximum max_tbs Tx buffers */
230 if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
231 IWL_ERR(trans, "Error can not send more than %d chunks\n",
232 trans_pcie->max_tbs);
236 put_unaligned_le64(addr, &tb->addr);
237 tb->tb_len = cpu_to_le16(len);
239 tfd->num_tbs = cpu_to_le16(idx + 1);
244 static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
246 struct iwl_tfh_tfd *tfd, int start_len,
247 u8 hdr_len, struct iwl_device_cmd *dev_cmd)
250 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
251 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
252 struct ieee80211_hdr *hdr = (void *)skb->data;
253 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
254 unsigned int mss = skb_shinfo(skb)->gso_size;
255 u16 length, iv_len, amsdu_pad;
257 struct iwl_tso_hdr_page *hdr_page;
258 struct page **page_ptr;
261 /* if the packet is protected, then it must be CCMP or GCMP */
262 iv_len = ieee80211_has_protected(hdr->frame_control) ?
263 IEEE80211_CCMP_HDR_LEN : 0;
265 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
266 &dev_cmd->hdr, start_len, 0);
268 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
269 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
270 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
273 /* total amount of header we may need for this A-MSDU */
274 hdr_room = DIV_ROUND_UP(total_len, mss) *
275 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
277 /* Our device supports 9 segments at most, it will fit in 1 page */
278 hdr_page = get_page_hdr(trans, hdr_room);
282 get_page(hdr_page->page);
283 start_hdr = hdr_page->pos;
284 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
285 *page_ptr = hdr_page->page;
286 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
287 hdr_page->pos += iv_len;
290 * Pull the ieee80211 header + IV to be able to use TSO core,
291 * we will restore it for the tx_status flow.
293 skb_pull(skb, hdr_len + iv_len);
296 * Remove the length of all the headers that we don't actually
297 * have in the MPDU by themselves, but that we duplicate into
298 * all the different MSDUs inside the A-MSDU.
300 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
302 tso_start(skb, &tso);
305 /* this is the data left for this subframe */
306 unsigned int data_left = min_t(unsigned int, mss, total_len);
307 struct sk_buff *csum_skb = NULL;
310 u8 *subf_hdrs_start = hdr_page->pos;
312 total_len -= data_left;
314 memset(hdr_page->pos, 0, amsdu_pad);
315 hdr_page->pos += amsdu_pad;
316 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
318 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
319 hdr_page->pos += ETH_ALEN;
320 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
321 hdr_page->pos += ETH_ALEN;
323 length = snap_ip_tcp_hdrlen + data_left;
324 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
325 hdr_page->pos += sizeof(length);
328 * This will copy the SNAP as well which will be considered
331 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
333 hdr_page->pos += snap_ip_tcp_hdrlen;
335 tb_len = hdr_page->pos - start_hdr;
336 tb_phys = dma_map_single(trans->dev, start_hdr,
337 tb_len, DMA_TO_DEVICE);
338 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
339 dev_kfree_skb(csum_skb);
342 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
343 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
344 /* add this subframe's headers' length to the tx_cmd */
345 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
347 /* prepare the start_hdr for the next subframe */
348 start_hdr = hdr_page->pos;
350 /* put the payload */
352 tb_len = min_t(unsigned int, tso.size, data_left);
353 tb_phys = dma_map_single(trans->dev, tso.data,
354 tb_len, DMA_TO_DEVICE);
355 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
356 dev_kfree_skb(csum_skb);
359 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
360 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
364 tso_build_data(skb, &tso, tb_len);
368 /* re -add the WiFi header and IV */
369 skb_push(skb, hdr_len + iv_len);
379 iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
381 struct iwl_device_cmd *dev_cmd,
383 struct iwl_cmd_meta *out_meta,
387 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
388 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
393 tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
395 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
398 * The second TB (tb1) points to the remainder of the TX command
399 * and the 802.11 header - dword aligned size
400 * (This calculation modifies the TX command, so do it before the
401 * setup of the first TB)
403 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
406 /* do not align A-MSDU to dword as the subframe header aligns it */
408 /* map the data for TB1 */
409 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
410 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
411 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
413 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
415 if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
416 len + IWL_FIRST_TB_SIZE,
420 /* building the A-MSDU might have changed this data, memcpy it now */
421 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
425 iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
429 static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
431 struct iwl_tfh_tfd *tfd,
432 struct iwl_cmd_meta *out_meta)
436 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
437 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
441 if (!skb_frag_size(frag))
444 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
445 skb_frag_size(frag), DMA_TO_DEVICE);
447 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
449 tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
450 skb_frag_size(frag));
451 trace_iwlwifi_dev_tx_tb(trans->dev, skb,
452 skb_frag_address(frag),
453 skb_frag_size(frag));
457 out_meta->tbs |= BIT(tb_idx);
464 iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
466 struct iwl_device_cmd *dev_cmd,
468 struct iwl_cmd_meta *out_meta,
473 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
474 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
476 int len, tb1_len, tb2_len;
479 tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
481 /* The first TB points to bi-directional DMA data */
482 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
484 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
487 * The second TB (tb1) points to the remainder of the TX command
488 * and the 802.11 header - dword aligned size
489 * (This calculation modifies the TX command, so do it before the
490 * setup of the first TB)
492 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
496 tb1_len = ALIGN(len, 4);
500 /* map the data for TB1 */
501 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
502 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
503 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
505 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
506 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
507 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
509 /* set up TFD's third entry to point to remainder of skb's head */
510 tb2_len = skb_headlen(skb) - hdr_len;
513 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
514 tb2_len, DMA_TO_DEVICE);
515 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
517 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
518 trace_iwlwifi_dev_tx_tb(trans->dev, skb,
523 if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
529 iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
534 struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
536 struct iwl_device_cmd *dev_cmd,
538 struct iwl_cmd_meta *out_meta)
540 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
541 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
542 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
546 /* There must be data left over for TB1 or this code must be changed */
547 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
549 memset(tfd, 0, sizeof(*tfd));
551 if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
552 len = sizeof(struct iwl_tx_cmd_gen2);
554 len = sizeof(struct iwl_tx_cmd_gen3);
556 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
557 (*ieee80211_get_qos_ctl(hdr) &
558 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
560 hdr_len = ieee80211_hdrlen(hdr->frame_control);
563 * Only build A-MSDUs here if doing so by GSO, otherwise it may be
564 * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
565 * built in the higher layers already.
567 if (amsdu && skb_shinfo(skb)->gso_size)
568 return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
569 out_meta, hdr_len, len);
571 return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
572 hdr_len, len, !amsdu);
575 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
576 struct iwl_device_cmd *dev_cmd, int txq_id)
578 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
579 struct iwl_cmd_meta *out_meta;
580 struct iwl_txq *txq = trans_pcie->txq[txq_id];
585 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
586 "TX on unused queue %d\n", txq_id))
589 if (skb_is_nonlinear(skb) &&
590 skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
591 __skb_linearize(skb))
594 spin_lock(&txq->lock);
596 if (iwl_queue_space(trans, txq) < txq->high_mark) {
597 iwl_stop_queue(trans, txq);
599 /* don't put the packet on the ring, if there is no room */
600 if (unlikely(iwl_queue_space(trans, txq) < 3)) {
601 struct iwl_device_cmd **dev_cmd_ptr;
603 dev_cmd_ptr = (void *)((u8 *)skb->cb +
604 trans_pcie->dev_cmd_offs);
606 *dev_cmd_ptr = dev_cmd;
607 __skb_queue_tail(&txq->overflow_q, skb);
608 spin_unlock(&txq->lock);
613 idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
615 /* Set up driver data for this TFD */
616 txq->entries[idx].skb = skb;
617 txq->entries[idx].cmd = dev_cmd;
619 dev_cmd->hdr.sequence =
620 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
623 /* Set up first empty entry in queue's array of Tx/cmd buffers */
624 out_meta = &txq->entries[idx].meta;
627 tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
629 spin_unlock(&txq->lock);
633 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
634 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
635 (void *)dev_cmd->payload;
637 cmd_len = le16_to_cpu(tx_cmd_gen3->len);
639 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
640 (void *)dev_cmd->payload;
642 cmd_len = le16_to_cpu(tx_cmd_gen2->len);
645 /* Set up entry for this TFD in Tx byte-count array */
646 iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
647 iwl_pcie_gen2_get_num_tbs(trans, tfd));
649 /* start timer if queue currently empty */
650 if (txq->read_ptr == txq->write_ptr) {
652 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
653 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
654 iwl_trans_ref(trans);
657 /* Tell device the write index *just past* this latest filled TFD */
658 txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
659 iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
661 * At this point the frame is "transmitted" successfully
662 * and we will get a TX status notification eventually.
664 spin_unlock(&txq->lock);
668 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
671 * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
672 * @priv: device private data point
673 * @cmd: a pointer to the ucode command structure
675 * The function returns < 0 values to indicate the operation
676 * failed. On success, it returns the index (>= 0) of command in the
679 static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
680 struct iwl_host_cmd *cmd)
682 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
683 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
684 struct iwl_device_cmd *out_cmd;
685 struct iwl_cmd_meta *out_meta;
687 void *dup_buf = NULL;
688 dma_addr_t phys_addr;
690 u16 copy_size, cmd_size, tb0_size;
691 bool had_nocopy = false;
692 u8 group_id = iwl_cmd_groupid(cmd->id);
693 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
694 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
695 struct iwl_tfh_tfd *tfd;
697 copy_size = sizeof(struct iwl_cmd_header_wide);
698 cmd_size = sizeof(struct iwl_cmd_header_wide);
700 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
701 cmddata[i] = cmd->data[i];
702 cmdlen[i] = cmd->len[i];
707 /* need at least IWL_FIRST_TB_SIZE copied */
708 if (copy_size < IWL_FIRST_TB_SIZE) {
709 int copy = IWL_FIRST_TB_SIZE - copy_size;
711 if (copy > cmdlen[i])
718 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
720 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
724 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
726 * This is also a chunk that isn't copied
727 * to the static buffer so set had_nocopy.
731 /* only allowed once */
732 if (WARN_ON(dup_buf)) {
737 dup_buf = kmemdup(cmddata[i], cmdlen[i],
742 /* NOCOPY must not be followed by normal! */
743 if (WARN_ON(had_nocopy)) {
747 copy_size += cmdlen[i];
749 cmd_size += cmd->len[i];
753 * If any of the command structures end up being larger than the
754 * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
755 * separate TFDs, then we will need to increase the size of the buffers
757 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
758 "Command %s (%#x) is too large (%d bytes)\n",
759 iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
764 spin_lock_bh(&txq->lock);
766 idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
767 tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
768 memset(tfd, 0, sizeof(*tfd));
770 if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
771 spin_unlock_bh(&txq->lock);
773 IWL_ERR(trans, "No space in command queue\n");
774 iwl_op_mode_cmd_queue_full(trans->op_mode);
779 out_cmd = txq->entries[idx].cmd;
780 out_meta = &txq->entries[idx].meta;
782 /* re-initialize to NULL */
783 memset(out_meta, 0, sizeof(*out_meta));
784 if (cmd->flags & CMD_WANT_SKB)
785 out_meta->source = cmd;
787 /* set up the header */
788 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
789 out_cmd->hdr_wide.group_id = group_id;
790 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
791 out_cmd->hdr_wide.length =
792 cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
793 out_cmd->hdr_wide.reserved = 0;
794 out_cmd->hdr_wide.sequence =
795 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
796 INDEX_TO_SEQ(txq->write_ptr));
798 cmd_pos = sizeof(struct iwl_cmd_header_wide);
799 copy_size = sizeof(struct iwl_cmd_header_wide);
801 /* and copy the data that needs to be copied */
802 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
808 /* copy everything if not nocopy/dup */
809 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
810 IWL_HCMD_DFL_DUP))) {
813 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
820 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
821 * in total (for bi-directional DMA), but copy up to what
822 * we can fit into the payload for debug dump purposes.
824 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
826 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
829 /* However, treat copy_size the proper way, we need it below */
830 if (copy_size < IWL_FIRST_TB_SIZE) {
831 copy = IWL_FIRST_TB_SIZE - copy_size;
833 if (copy > cmd->len[i])
840 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
841 iwl_get_cmd_string(trans, cmd->id), group_id,
842 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
843 cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
845 /* start the TFD with the minimum copy bytes */
846 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
847 memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
848 iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
851 /* map first command fragment, if any remains */
852 if (copy_size > tb0_size) {
853 phys_addr = dma_map_single(trans->dev,
854 (u8 *)out_cmd + tb0_size,
855 copy_size - tb0_size,
857 if (dma_mapping_error(trans->dev, phys_addr)) {
859 iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
862 iwl_pcie_gen2_set_tb(trans, tfd, phys_addr,
863 copy_size - tb0_size);
866 /* map the remaining (adjusted) nocopy/dup fragments */
867 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
868 const void *data = cmddata[i];
872 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
875 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
877 phys_addr = dma_map_single(trans->dev, (void *)data,
878 cmdlen[i], DMA_TO_DEVICE);
879 if (dma_mapping_error(trans->dev, phys_addr)) {
881 iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
884 iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
887 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
888 out_meta->flags = cmd->flags;
889 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
890 kzfree(txq->entries[idx].free_buf);
891 txq->entries[idx].free_buf = dup_buf;
893 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
895 /* start timer if queue currently empty */
896 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
897 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
899 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
900 if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
901 !trans_pcie->ref_cmd_in_flight) {
902 trans_pcie->ref_cmd_in_flight = true;
903 IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
904 iwl_trans_ref(trans);
906 /* Increment and update queue's write index */
907 txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
908 iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
909 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
912 spin_unlock_bh(&txq->lock);
919 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
921 static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
922 struct iwl_host_cmd *cmd)
924 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
925 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
926 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
930 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
932 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
934 "Command %s: a command is already active!\n", cmd_str))
937 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
939 if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
940 ret = wait_event_timeout(trans_pcie->d0i3_waitq,
941 pm_runtime_active(&trans_pcie->pci_dev->dev),
942 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
944 IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
949 cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
952 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
953 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
958 ret = wait_event_timeout(trans_pcie->wait_command_queue,
959 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
961 HOST_COMPLETE_TIMEOUT);
963 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
964 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
966 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
967 txq->read_ptr, txq->write_ptr);
969 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
970 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
974 iwl_trans_pcie_sync_nmi(trans);
978 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
979 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
985 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
986 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
987 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
992 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
993 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
1001 if (cmd->flags & CMD_WANT_SKB) {
1003 * Cancel the CMD_WANT_SKB flag for the cmd in the
1004 * TX cmd queue. Otherwise in case the cmd comes
1005 * in later, it will possibly set an invalid
1006 * address (cmd->meta.source).
1008 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1011 if (cmd->resp_pkt) {
1013 cmd->resp_pkt = NULL;
1019 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
1020 struct iwl_host_cmd *cmd)
1022 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1023 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1024 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1029 if (cmd->flags & CMD_ASYNC) {
1032 /* An asynchronous command can not expect an SKB to be set. */
1033 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1036 ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
1039 "Error sending %s: enqueue_hcmd failed: %d\n",
1040 iwl_get_cmd_string(trans, cmd->id), ret);
1046 return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
1050 * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's
1052 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
1054 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1055 struct iwl_txq *txq = trans_pcie->txq[txq_id];
1057 spin_lock_bh(&txq->lock);
1058 while (txq->write_ptr != txq->read_ptr) {
1059 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
1060 txq_id, txq->read_ptr);
1062 if (txq_id != trans_pcie->cmd_queue) {
1063 int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
1064 struct sk_buff *skb = txq->entries[idx].skb;
1066 if (WARN_ON_ONCE(!skb))
1069 iwl_pcie_free_tso_page(trans_pcie, skb);
1071 iwl_pcie_gen2_free_tfd(trans, txq);
1072 txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
1074 if (txq->read_ptr == txq->write_ptr) {
1075 unsigned long flags;
1077 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1078 if (txq_id != trans_pcie->cmd_queue) {
1079 IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
1081 iwl_trans_unref(trans);
1082 } else if (trans_pcie->ref_cmd_in_flight) {
1083 trans_pcie->ref_cmd_in_flight = false;
1084 IWL_DEBUG_RPM(trans,
1085 "clear ref_cmd_in_flight\n");
1086 iwl_trans_unref(trans);
1088 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1092 while (!skb_queue_empty(&txq->overflow_q)) {
1093 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
1095 iwl_op_mode_free_skb(trans->op_mode, skb);
1098 spin_unlock_bh(&txq->lock);
1100 /* just in case - this queue may have been stopped */
1101 iwl_wake_queue(trans, txq);
1104 void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
1105 struct iwl_txq *txq)
1107 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1108 struct device *dev = trans->dev;
1110 /* De-alloc circular buffer of TFDs */
1112 dma_free_coherent(dev,
1113 trans_pcie->tfd_size * txq->n_window,
1114 txq->tfds, txq->dma_addr);
1115 dma_free_coherent(dev,
1116 sizeof(*txq->first_tb_bufs) * txq->n_window,
1117 txq->first_tb_bufs, txq->first_tb_dma);
1120 kfree(txq->entries);
1121 iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
1126 * iwl_pcie_txq_free - Deallocate DMA queue.
1127 * @txq: Transmit queue to deallocate.
1129 * Empty queue by removing and destroying all BD's.
1131 * 0-fill, but do not free "txq" descriptor structure.
1133 static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
1135 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1136 struct iwl_txq *txq = trans_pcie->txq[txq_id];
1142 iwl_pcie_gen2_txq_unmap(trans, txq_id);
1144 /* De-alloc array of command/tx buffers */
1145 if (txq_id == trans_pcie->cmd_queue)
1146 for (i = 0; i < txq->n_window; i++) {
1147 kzfree(txq->entries[i].cmd);
1148 kzfree(txq->entries[i].free_buf);
1150 del_timer_sync(&txq->stuck_timer);
1152 iwl_pcie_gen2_txq_free_memory(trans, txq);
1154 trans_pcie->txq[txq_id] = NULL;
1156 clear_bit(txq_id, trans_pcie->queue_used);
1159 int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
1160 struct iwl_txq **intxq, int size,
1161 unsigned int timeout)
1165 struct iwl_txq *txq;
1166 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1169 ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
1170 (trans->cfg->device_family >=
1171 IWL_DEVICE_FAMILY_22560) ?
1172 sizeof(struct iwl_gen3_bc_tbl) :
1173 sizeof(struct iwlagn_scd_bc_tbl));
1175 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1180 ret = iwl_pcie_txq_alloc(trans, txq, size, false);
1182 IWL_ERR(trans, "Tx queue alloc failed\n");
1185 ret = iwl_pcie_txq_init(trans, txq, size, false);
1187 IWL_ERR(trans, "Tx queue init failed\n");
1191 txq->wd_timeout = msecs_to_jiffies(timeout);
1197 iwl_pcie_gen2_txq_free_memory(trans, txq);
1201 int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
1202 struct iwl_txq *txq,
1203 struct iwl_host_cmd *hcmd)
1205 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1206 struct iwl_tx_queue_cfg_rsp *rsp;
1210 if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1213 goto error_free_resp;
1216 rsp = (void *)hcmd->resp_pkt->data;
1217 qid = le16_to_cpu(rsp->queue_number);
1218 wr_ptr = le16_to_cpu(rsp->write_pointer);
1220 if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
1221 WARN_ONCE(1, "queue index %d unsupported", qid);
1223 goto error_free_resp;
1226 if (test_and_set_bit(qid, trans_pcie->queue_used)) {
1227 WARN_ONCE(1, "queue %d already used", qid);
1229 goto error_free_resp;
1233 trans_pcie->txq[qid] = txq;
1234 wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);
1236 /* Place first TFD at index corresponding to start sequence number */
1237 txq->read_ptr = wr_ptr;
1238 txq->write_ptr = wr_ptr;
1240 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1242 iwl_free_resp(hcmd);
1246 iwl_free_resp(hcmd);
1247 iwl_pcie_gen2_txq_free_memory(trans, txq);
1251 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1252 __le16 flags, u8 sta_id, u8 tid,
1253 int cmd_id, int size,
1254 unsigned int timeout)
1256 struct iwl_txq *txq = NULL;
1257 struct iwl_tx_queue_cfg_cmd cmd = {
1262 struct iwl_host_cmd hcmd = {
1264 .len = { sizeof(cmd) },
1266 .flags = CMD_WANT_SKB,
1270 ret = iwl_trans_pcie_dyn_txq_alloc_dma(trans, &txq, size, timeout);
1274 cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
1275 cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1276 cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1278 ret = iwl_trans_send_cmd(trans, &hcmd);
1282 return iwl_trans_pcie_txq_alloc_response(trans, txq, &hcmd);
1285 iwl_pcie_gen2_txq_free_memory(trans, txq);
1289 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
1291 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1294 * Upon HW Rfkill - we stop the device, and then stop the queues
1295 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1296 * allow the op_mode to call txq_disable after it already called
1299 if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
1300 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1301 "queue %d not used", queue);
1305 iwl_pcie_gen2_txq_unmap(trans, queue);
1307 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1310 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
1312 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1315 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1317 /* Free all TX queues */
1318 for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
1319 if (!trans_pcie->txq[i])
1322 iwl_pcie_gen2_txq_free(trans, i);
1326 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size)
1328 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1329 struct iwl_txq *queue;
1332 /* alloc and init the tx queue */
1333 if (!trans_pcie->txq[txq_id]) {
1334 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1336 IWL_ERR(trans, "Not enough memory for tx queue\n");
1339 trans_pcie->txq[txq_id] = queue;
1340 ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
1342 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1346 queue = trans_pcie->txq[txq_id];
1349 ret = iwl_pcie_txq_init(trans, queue, queue_size,
1350 (txq_id == trans_pcie->cmd_queue));
1352 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1355 trans_pcie->txq[txq_id]->id = txq_id;
1356 set_bit(txq_id, trans_pcie->queue_used);
1361 iwl_pcie_gen2_tx_free(trans);