2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/etherdevice.h>
25 static u8 ath10k_htt_tx_txq_calc_size(size_t count)
33 while (factor >= 64 && exp < 4) {
42 factor = max(1, factor);
44 return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
45 SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
48 static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
49 struct ieee80211_txq *txq)
51 struct ath10k *ar = hw->priv;
52 struct ath10k_sta *arsta;
53 struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
54 unsigned long frame_cnt;
55 unsigned long byte_cnt;
62 lockdep_assert_held(&ar->htt.tx_lock);
64 if (!ar->htt.tx_q_state.enabled)
67 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
71 arsta = (void *)txq->sta->drv_priv;
72 peer_id = arsta->peer_id;
74 peer_id = arvif->peer_id;
78 bit = BIT(peer_id % 32);
81 ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
82 count = ath10k_htt_tx_txq_calc_size(byte_cnt);
84 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
85 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
86 ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
91 ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
92 ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
93 ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
95 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
99 static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
104 lockdep_assert_held(&ar->htt.tx_lock);
106 if (!ar->htt.tx_q_state.enabled)
109 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
112 seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
114 ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
116 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
119 size = sizeof(*ar->htt.tx_q_state.vaddr);
120 dma_sync_single_for_device(ar->dev,
121 ar->htt.tx_q_state.paddr,
126 void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
127 struct ieee80211_txq *txq)
129 struct ath10k *ar = hw->priv;
131 spin_lock_bh(&ar->htt.tx_lock);
132 __ath10k_htt_tx_txq_recalc(hw, txq);
133 spin_unlock_bh(&ar->htt.tx_lock);
136 void ath10k_htt_tx_txq_sync(struct ath10k *ar)
138 spin_lock_bh(&ar->htt.tx_lock);
139 __ath10k_htt_tx_txq_sync(ar);
140 spin_unlock_bh(&ar->htt.tx_lock);
143 void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
144 struct ieee80211_txq *txq)
146 struct ath10k *ar = hw->priv;
148 spin_lock_bh(&ar->htt.tx_lock);
149 __ath10k_htt_tx_txq_recalc(hw, txq);
150 __ath10k_htt_tx_txq_sync(ar);
151 spin_unlock_bh(&ar->htt.tx_lock);
154 void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
156 lockdep_assert_held(&htt->tx_lock);
158 htt->num_pending_tx--;
159 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
160 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
163 int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
165 lockdep_assert_held(&htt->tx_lock);
167 if (htt->num_pending_tx >= htt->max_num_pending_tx)
170 htt->num_pending_tx++;
171 if (htt->num_pending_tx == htt->max_num_pending_tx)
172 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
177 int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
180 struct ath10k *ar = htt->ar;
182 lockdep_assert_held(&htt->tx_lock);
184 if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
188 ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
191 htt->num_pending_mgmt_tx++;
196 void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
198 lockdep_assert_held(&htt->tx_lock);
200 if (!htt->ar->hw_params.max_probe_resp_desc_thres)
203 htt->num_pending_mgmt_tx--;
206 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
208 struct ath10k *ar = htt->ar;
211 lockdep_assert_held(&htt->tx_lock);
213 ret = idr_alloc(&htt->pending_tx, skb, 0,
214 htt->max_num_pending_tx, GFP_ATOMIC);
216 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
221 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
223 struct ath10k *ar = htt->ar;
225 lockdep_assert_held(&htt->tx_lock);
227 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
229 idr_remove(&htt->pending_tx, msdu_id);
232 static void ath10k_htt_tx_free_cont_txbuf(struct ath10k_htt *htt)
234 struct ath10k *ar = htt->ar;
237 if (!htt->txbuf.vaddr)
240 size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
241 dma_free_coherent(ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr);
242 htt->txbuf.vaddr = NULL;
245 static int ath10k_htt_tx_alloc_cont_txbuf(struct ath10k_htt *htt)
247 struct ath10k *ar = htt->ar;
250 size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
251 htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr,
253 if (!htt->txbuf.vaddr)
259 static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)
263 if (!htt->frag_desc.vaddr_desc_32)
266 size = htt->max_num_pending_tx *
267 sizeof(struct htt_msdu_ext_desc);
269 dma_free_coherent(htt->ar->dev,
271 htt->frag_desc.vaddr_desc_32,
272 htt->frag_desc.paddr);
274 htt->frag_desc.vaddr_desc_32 = NULL;
277 static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)
279 struct ath10k *ar = htt->ar;
282 if (!ar->hw_params.continuous_frag_desc)
285 size = htt->max_num_pending_tx *
286 sizeof(struct htt_msdu_ext_desc);
287 htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,
288 &htt->frag_desc.paddr,
290 if (!htt->frag_desc.vaddr_desc_32) {
291 ath10k_err(ar, "failed to alloc fragment desc memory\n");
294 htt->frag_desc.size = size;
299 static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)
303 if (!htt->frag_desc.vaddr_desc_64)
306 size = htt->max_num_pending_tx *
307 sizeof(struct htt_msdu_ext_desc_64);
309 dma_free_coherent(htt->ar->dev,
311 htt->frag_desc.vaddr_desc_64,
312 htt->frag_desc.paddr);
314 htt->frag_desc.vaddr_desc_64 = NULL;
317 static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)
319 struct ath10k *ar = htt->ar;
322 if (!ar->hw_params.continuous_frag_desc)
325 size = htt->max_num_pending_tx *
326 sizeof(struct htt_msdu_ext_desc_64);
328 htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,
329 &htt->frag_desc.paddr,
331 if (!htt->frag_desc.vaddr_desc_64) {
332 ath10k_err(ar, "failed to alloc fragment desc memory\n");
335 htt->frag_desc.size = size;
340 static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
342 struct ath10k *ar = htt->ar;
345 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
346 ar->running_fw->fw_file.fw_features))
349 size = sizeof(*htt->tx_q_state.vaddr);
351 dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE);
352 kfree(htt->tx_q_state.vaddr);
355 static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
357 struct ath10k *ar = htt->ar;
361 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
362 ar->running_fw->fw_file.fw_features))
365 htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
366 htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS;
367 htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES;
369 size = sizeof(*htt->tx_q_state.vaddr);
370 htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);
371 if (!htt->tx_q_state.vaddr)
374 htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,
375 size, DMA_TO_DEVICE);
376 ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr);
378 ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret);
379 kfree(htt->tx_q_state.vaddr);
386 static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt)
388 WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
389 kfifo_free(&htt->txdone_fifo);
392 static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt)
397 size = roundup_pow_of_two(htt->max_num_pending_tx);
398 ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
402 static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
404 struct ath10k *ar = htt->ar;
407 ret = ath10k_htt_tx_alloc_cont_txbuf(htt);
409 ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
413 ret = htt->tx_ops->htt_alloc_frag_desc(htt);
415 ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
419 ret = ath10k_htt_tx_alloc_txq(htt);
421 ath10k_err(ar, "failed to alloc txq: %d\n", ret);
425 ret = ath10k_htt_tx_alloc_txdone_fifo(htt);
427 ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
434 ath10k_htt_tx_free_txq(htt);
437 htt->tx_ops->htt_free_frag_desc(htt);
440 ath10k_htt_tx_free_cont_txbuf(htt);
445 int ath10k_htt_tx_start(struct ath10k_htt *htt)
447 struct ath10k *ar = htt->ar;
450 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
451 htt->max_num_pending_tx);
453 spin_lock_init(&htt->tx_lock);
454 idr_init(&htt->pending_tx);
456 if (htt->tx_mem_allocated)
459 ret = ath10k_htt_tx_alloc_buf(htt);
461 goto free_idr_pending_tx;
463 htt->tx_mem_allocated = true;
468 idr_destroy(&htt->pending_tx);
473 static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
475 struct ath10k *ar = ctx;
476 struct ath10k_htt *htt = &ar->htt;
477 struct htt_tx_done tx_done = {0};
479 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
481 tx_done.msdu_id = msdu_id;
482 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
484 ath10k_txrx_tx_unref(htt, &tx_done);
489 void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
491 if (!htt->tx_mem_allocated)
494 ath10k_htt_tx_free_cont_txbuf(htt);
495 ath10k_htt_tx_free_txq(htt);
496 htt->tx_ops->htt_free_frag_desc(htt);
497 ath10k_htt_tx_free_txdone_fifo(htt);
498 htt->tx_mem_allocated = false;
501 void ath10k_htt_tx_stop(struct ath10k_htt *htt)
503 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
504 idr_destroy(&htt->pending_tx);
507 void ath10k_htt_tx_free(struct ath10k_htt *htt)
509 ath10k_htt_tx_stop(htt);
510 ath10k_htt_tx_destroy(htt);
513 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
515 dev_kfree_skb_any(skb);
518 void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
520 dev_kfree_skb_any(skb);
522 EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
524 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
526 struct ath10k *ar = htt->ar;
532 len += sizeof(cmd->hdr);
533 len += sizeof(cmd->ver_req);
535 skb = ath10k_htc_alloc_skb(ar, len);
540 cmd = (struct htt_cmd *)skb->data;
541 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
543 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
545 dev_kfree_skb_any(skb);
552 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
554 struct ath10k *ar = htt->ar;
555 struct htt_stats_req *req;
560 len += sizeof(cmd->hdr);
561 len += sizeof(cmd->stats_req);
563 skb = ath10k_htc_alloc_skb(ar, len);
568 cmd = (struct htt_cmd *)skb->data;
569 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
571 req = &cmd->stats_req;
573 memset(req, 0, sizeof(*req));
575 /* currently we support only max 8 bit masks so no need to worry
576 * about endian support
578 req->upload_types[0] = mask;
579 req->reset_types[0] = mask;
580 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
581 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
582 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
584 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
586 ath10k_warn(ar, "failed to send htt type stats request: %d",
588 dev_kfree_skb_any(skb);
595 static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)
597 struct ath10k *ar = htt->ar;
600 struct htt_frag_desc_bank_cfg32 *cfg;
604 if (!ar->hw_params.continuous_frag_desc)
607 if (!htt->frag_desc.paddr) {
608 ath10k_warn(ar, "invalid frag desc memory\n");
612 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);
613 skb = ath10k_htc_alloc_skb(ar, size);
618 cmd = (struct htt_cmd *)skb->data;
619 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
622 info |= SM(htt->tx_q_state.type,
623 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
625 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
626 ar->running_fw->fw_file.fw_features))
627 info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
629 cfg = &cmd->frag_desc_bank_cfg32;
632 cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
633 cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr);
634 cfg->bank_id[0].bank_min_id = 0;
635 cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
638 cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
639 cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
640 cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
641 cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
642 cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
644 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
646 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
648 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
650 dev_kfree_skb_any(skb);
657 static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
659 struct ath10k *ar = htt->ar;
662 struct htt_frag_desc_bank_cfg64 *cfg;
666 if (!ar->hw_params.continuous_frag_desc)
669 if (!htt->frag_desc.paddr) {
670 ath10k_warn(ar, "invalid frag desc memory\n");
674 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);
675 skb = ath10k_htc_alloc_skb(ar, size);
680 cmd = (struct htt_cmd *)skb->data;
681 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
684 info |= SM(htt->tx_q_state.type,
685 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
687 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
688 ar->running_fw->fw_file.fw_features))
689 info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
691 cfg = &cmd->frag_desc_bank_cfg64;
694 cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);
695 cfg->bank_base_addrs[0] = __cpu_to_le64(htt->frag_desc.paddr);
696 cfg->bank_id[0].bank_min_id = 0;
697 cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
700 cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
701 cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
702 cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
703 cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
704 cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
706 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
708 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
710 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
712 dev_kfree_skb_any(skb);
719 static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
721 struct htt_rx_ring_setup_ring32 *ring =
722 (struct htt_rx_ring_setup_ring32 *)rx_ring;
724 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
725 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
726 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
727 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
728 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
729 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
730 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
731 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
732 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
733 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
734 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
738 static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
740 struct htt_rx_ring_setup_ring64 *ring =
741 (struct htt_rx_ring_setup_ring64 *)rx_ring;
743 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
744 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
745 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
746 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
747 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
748 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
749 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
750 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
751 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
752 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
753 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
757 static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
759 struct ath10k *ar = htt->ar;
762 struct htt_rx_ring_setup_ring32 *ring;
763 const int num_rx_ring = 1;
770 * the HW expects the buffer to be an integral number of 4-byte
773 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
774 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
776 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
777 + (sizeof(*ring) * num_rx_ring);
778 skb = ath10k_htc_alloc_skb(ar, len);
784 cmd = (struct htt_cmd *)skb->data;
785 ring = &cmd->rx_setup_32.rings[0];
787 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
788 cmd->rx_setup_32.hdr.num_rings = 1;
790 /* FIXME: do we need all of this? */
792 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
793 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
794 flags |= HTT_RX_RING_FLAGS_PPDU_START;
795 flags |= HTT_RX_RING_FLAGS_PPDU_END;
796 flags |= HTT_RX_RING_FLAGS_MPDU_START;
797 flags |= HTT_RX_RING_FLAGS_MPDU_END;
798 flags |= HTT_RX_RING_FLAGS_MSDU_START;
799 flags |= HTT_RX_RING_FLAGS_MSDU_END;
800 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
801 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
802 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
803 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
804 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
805 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
806 flags |= HTT_RX_RING_FLAGS_NULL_RX;
807 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
809 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
811 ring->fw_idx_shadow_reg_paddr =
812 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
813 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
814 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
815 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
816 ring->flags = __cpu_to_le16(flags);
817 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
819 ath10k_htt_fill_rx_desc_offset_32(ring);
820 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
822 dev_kfree_skb_any(skb);
829 static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
831 struct ath10k *ar = htt->ar;
834 struct htt_rx_ring_setup_ring64 *ring;
835 const int num_rx_ring = 1;
841 /* HW expects the buffer to be an integral number of 4-byte
844 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
845 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
847 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)
848 + (sizeof(*ring) * num_rx_ring);
849 skb = ath10k_htc_alloc_skb(ar, len);
855 cmd = (struct htt_cmd *)skb->data;
856 ring = &cmd->rx_setup_64.rings[0];
858 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
859 cmd->rx_setup_64.hdr.num_rings = 1;
862 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
863 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
864 flags |= HTT_RX_RING_FLAGS_PPDU_START;
865 flags |= HTT_RX_RING_FLAGS_PPDU_END;
866 flags |= HTT_RX_RING_FLAGS_MPDU_START;
867 flags |= HTT_RX_RING_FLAGS_MPDU_END;
868 flags |= HTT_RX_RING_FLAGS_MSDU_START;
869 flags |= HTT_RX_RING_FLAGS_MSDU_END;
870 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
871 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
872 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
873 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
874 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
875 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
876 flags |= HTT_RX_RING_FLAGS_NULL_RX;
877 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
879 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
881 ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
882 ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
883 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
884 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
885 ring->flags = __cpu_to_le16(flags);
886 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
888 ath10k_htt_fill_rx_desc_offset_64(ring);
889 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
891 dev_kfree_skb_any(skb);
898 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
899 u8 max_subfrms_ampdu,
900 u8 max_subfrms_amsdu)
902 struct ath10k *ar = htt->ar;
903 struct htt_aggr_conf *aggr_conf;
909 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
911 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
914 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
917 len = sizeof(cmd->hdr);
918 len += sizeof(cmd->aggr_conf);
920 skb = ath10k_htc_alloc_skb(ar, len);
925 cmd = (struct htt_cmd *)skb->data;
926 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
928 aggr_conf = &cmd->aggr_conf;
929 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
930 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
932 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
933 aggr_conf->max_num_amsdu_subframes,
934 aggr_conf->max_num_ampdu_subframes);
936 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
938 dev_kfree_skb_any(skb);
945 int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
947 __le16 fetch_seq_num,
948 struct htt_tx_fetch_record *records,
953 const u16 resp_id = 0;
957 /* Response IDs are echo-ed back only for host driver convienence
958 * purposes. They aren't used for anything in the driver yet so use 0.
961 len += sizeof(cmd->hdr);
962 len += sizeof(cmd->tx_fetch_resp);
963 len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
965 skb = ath10k_htc_alloc_skb(ar, len);
970 cmd = (struct htt_cmd *)skb->data;
971 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
972 cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
973 cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
974 cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
975 cmd->tx_fetch_resp.token = token;
977 memcpy(cmd->tx_fetch_resp.records, records,
978 sizeof(records[0]) * num_records);
980 ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
982 ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
989 dev_kfree_skb_any(skb);
994 static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
996 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
997 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
998 struct ath10k_vif *arvif;
1000 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
1001 return ar->scan.vdev_id;
1002 } else if (cb->vif) {
1003 arvif = (void *)cb->vif->drv_priv;
1004 return arvif->vdev_id;
1005 } else if (ar->monitor_started) {
1006 return ar->monitor_vdev_id;
1012 static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
1014 struct ieee80211_hdr *hdr = (void *)skb->data;
1015 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
1017 if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
1018 return HTT_DATA_TX_EXT_TID_MGMT;
1019 else if (cb->flags & ATH10K_SKB_F_QOS)
1020 return skb->priority % IEEE80211_QOS_CTL_TID_MASK;
1022 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1025 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
1027 struct ath10k *ar = htt->ar;
1028 struct device *dev = ar->dev;
1029 struct sk_buff *txdesc = NULL;
1030 struct htt_cmd *cmd;
1031 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1032 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1036 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1038 len += sizeof(cmd->hdr);
1039 len += sizeof(cmd->mgmt_tx);
1041 spin_lock_bh(&htt->tx_lock);
1042 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1043 spin_unlock_bh(&htt->tx_lock);
1049 if ((ieee80211_is_action(hdr->frame_control) ||
1050 ieee80211_is_deauth(hdr->frame_control) ||
1051 ieee80211_is_disassoc(hdr->frame_control)) &&
1052 ieee80211_has_protected(hdr->frame_control)) {
1053 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1056 txdesc = ath10k_htc_alloc_skb(ar, len);
1059 goto err_free_msdu_id;
1062 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1064 res = dma_mapping_error(dev, skb_cb->paddr);
1067 goto err_free_txdesc;
1070 skb_put(txdesc, len);
1071 cmd = (struct htt_cmd *)txdesc->data;
1072 memset(cmd, 0, len);
1074 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
1075 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
1076 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
1077 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
1078 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
1079 memcpy(cmd->mgmt_tx.hdr, msdu->data,
1080 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
1082 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
1084 goto err_unmap_msdu;
1089 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1091 dev_kfree_skb_any(txdesc);
1093 spin_lock_bh(&htt->tx_lock);
1094 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1095 spin_unlock_bh(&htt->tx_lock);
1100 int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
1101 struct sk_buff *msdu)
1103 struct ath10k *ar = htt->ar;
1104 struct device *dev = ar->dev;
1105 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1106 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
1107 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1108 struct ath10k_hif_sg_item sg_items[2];
1109 struct ath10k_htt_txbuf *txbuf;
1110 struct htt_data_tx_desc_frag *frags;
1111 bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1112 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1113 u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1117 u16 msdu_id, flags1 = 0;
1119 u32 frags_paddr = 0;
1121 struct htt_msdu_ext_desc *ext_desc = NULL;
1122 struct htt_msdu_ext_desc *ext_desc_t = NULL;
1124 spin_lock_bh(&htt->tx_lock);
1125 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1126 spin_unlock_bh(&htt->tx_lock);
1132 prefetch_len = min(htt->prefetch_len, msdu->len);
1133 prefetch_len = roundup(prefetch_len, 4);
1135 txbuf = &htt->txbuf.vaddr[msdu_id];
1136 txbuf_paddr = htt->txbuf.paddr +
1137 (sizeof(struct ath10k_htt_txbuf) * msdu_id);
1139 if ((ieee80211_is_action(hdr->frame_control) ||
1140 ieee80211_is_deauth(hdr->frame_control) ||
1141 ieee80211_is_disassoc(hdr->frame_control)) &&
1142 ieee80211_has_protected(hdr->frame_control)) {
1143 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1144 } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
1145 txmode == ATH10K_HW_TXRX_RAW &&
1146 ieee80211_has_protected(hdr->frame_control)) {
1147 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1150 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1152 res = dma_mapping_error(dev, skb_cb->paddr);
1155 goto err_free_msdu_id;
1158 if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
1159 freq = ar->scan.roc_freq;
1162 case ATH10K_HW_TXRX_RAW:
1163 case ATH10K_HW_TXRX_NATIVE_WIFI:
1164 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1166 case ATH10K_HW_TXRX_ETHERNET:
1167 if (ar->hw_params.continuous_frag_desc) {
1168 ext_desc_t = htt->frag_desc.vaddr_desc_32;
1169 memset(&ext_desc_t[msdu_id], 0,
1170 sizeof(struct htt_msdu_ext_desc));
1171 frags = (struct htt_data_tx_desc_frag *)
1172 &ext_desc_t[msdu_id].frags;
1173 ext_desc = &ext_desc_t[msdu_id];
1174 frags[0].tword_addr.paddr_lo =
1175 __cpu_to_le32(skb_cb->paddr);
1176 frags[0].tword_addr.paddr_hi = 0;
1177 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1179 frags_paddr = htt->frag_desc.paddr +
1180 (sizeof(struct htt_msdu_ext_desc) * msdu_id);
1182 frags = txbuf->frags;
1183 frags[0].dword_addr.paddr =
1184 __cpu_to_le32(skb_cb->paddr);
1185 frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
1186 frags[1].dword_addr.paddr = 0;
1187 frags[1].dword_addr.len = 0;
1189 frags_paddr = txbuf_paddr;
1191 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1193 case ATH10K_HW_TXRX_MGMT:
1194 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1195 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1196 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1198 frags_paddr = skb_cb->paddr;
1202 /* Normally all commands go through HTC which manages tx credits for
1203 * each endpoint and notifies when tx is completed.
1205 * HTT endpoint is creditless so there's no need to care about HTC
1206 * flags. In that case it is trivial to fill the HTC header here.
1208 * MSDU transmission is considered completed upon HTT event. This
1209 * implies no relevant resources can be freed until after the event is
1210 * received. That's why HTC tx completion handler itself is ignored by
1211 * setting NULL to transfer_context for all sg items.
1213 * There is simply no point in pushing HTT TX_FRM through HTC tx path
1214 * as it's a waste of resources. By bypassing HTC it is possible to
1215 * avoid extra memory allocations, compress data structures and thus
1216 * improve performance.
1219 txbuf->htc_hdr.eid = htt->eid;
1220 txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
1221 sizeof(txbuf->cmd_tx) +
1223 txbuf->htc_hdr.flags = 0;
1225 if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1226 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1228 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1229 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1230 if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1231 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1232 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1233 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1234 if (ar->hw_params.continuous_frag_desc)
1235 ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
1238 /* Prevent firmware from sending up tx inspection requests. There's
1239 * nothing ath10k can do with frames requested for inspection so force
1240 * it to simply rely a regular tx completion with discard status.
1242 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1244 txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1245 txbuf->cmd_tx.flags0 = flags0;
1246 txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
1247 txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
1248 txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
1249 txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
1250 if (ath10k_mac_tx_frm_has_freq(ar)) {
1251 txbuf->cmd_tx.offchan_tx.peerid =
1252 __cpu_to_le16(HTT_INVALID_PEERID);
1253 txbuf->cmd_tx.offchan_tx.freq =
1254 __cpu_to_le16(freq);
1256 txbuf->cmd_tx.peerid =
1257 __cpu_to_le32(HTT_INVALID_PEERID);
1260 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
1261 ath10k_dbg(ar, ATH10K_DBG_HTT,
1262 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
1263 flags0, flags1, msdu->len, msdu_id, frags_paddr,
1264 (u32)skb_cb->paddr, vdev_id, tid, freq);
1265 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
1266 msdu->data, msdu->len);
1267 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
1268 trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
1270 sg_items[0].transfer_id = 0;
1271 sg_items[0].transfer_context = NULL;
1272 sg_items[0].vaddr = &txbuf->htc_hdr;
1273 sg_items[0].paddr = txbuf_paddr +
1274 sizeof(txbuf->frags);
1275 sg_items[0].len = sizeof(txbuf->htc_hdr) +
1276 sizeof(txbuf->cmd_hdr) +
1277 sizeof(txbuf->cmd_tx);
1279 sg_items[1].transfer_id = 0;
1280 sg_items[1].transfer_context = NULL;
1281 sg_items[1].vaddr = msdu->data;
1282 sg_items[1].paddr = skb_cb->paddr;
1283 sg_items[1].len = prefetch_len;
1285 res = ath10k_hif_tx_sg(htt->ar,
1286 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
1287 sg_items, ARRAY_SIZE(sg_items));
1289 goto err_unmap_msdu;
1294 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1296 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1301 static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
1302 .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,
1303 .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
1304 .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,
1305 .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,
1308 static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
1309 .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,
1310 .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,
1311 .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,
1312 .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,
1315 void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
1317 struct ath10k *ar = htt->ar;
1319 if (ar->hw_params.target_64bit)
1320 htt->tx_ops = &htt_tx_ops_64;
1322 htt->tx_ops = &htt_tx_ops_32;