2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
20 struct ath9k_tx_queue_info *qi)
22 ath_print(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
23 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
24 ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
25 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
26 ah->txurn_interrupt_mask);
28 REG_WRITE(ah, AR_IMR_S0,
29 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
30 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
31 REG_WRITE(ah, AR_IMR_S1,
32 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
33 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
35 ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
36 ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
37 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
40 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
42 return REG_READ(ah, AR_QTXDP(q));
44 EXPORT_SYMBOL(ath9k_hw_gettxbuf);
46 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
48 REG_WRITE(ah, AR_QTXDP(q), txdp);
50 EXPORT_SYMBOL(ath9k_hw_puttxbuf);
52 void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
54 ath_print(ath9k_hw_common(ah), ATH_DBG_QUEUE,
55 "Enable TXE on queue: %u\n", q);
56 REG_WRITE(ah, AR_Q_TXE, 1 << q);
58 EXPORT_SYMBOL(ath9k_hw_txstart);
60 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
64 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
67 if (REG_READ(ah, AR_Q_TXE) & (1 << q))
73 EXPORT_SYMBOL(ath9k_hw_numtxpending);
76 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
78 * @ah: atheros hardware struct
79 * @bIncTrigLevel: whether or not the frame trigger level should be updated
81 * The frame trigger level specifies the minimum number of bytes,
82 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
83 * before the PCU will initiate sending the frame on the air. This can
84 * mean we initiate transmit before a full frame is on the PCU TX FIFO.
85 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
88 * Caution must be taken to ensure to set the frame trigger level based
89 * on the DMA request size. For example if the DMA request size is set to
90 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
91 * there need to be enough space in the tx FIFO for the requested transfer
92 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
93 * the threshold to a value beyond 6, then the transmit will hang.
95 * Current dual stream devices have a PCU TX FIFO size of 8 KB.
96 * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
97 * there is a hardware issue which forces us to use 2 KB instead so the
98 * frame trigger level must not exceed 2 KB for these chipsets.
100 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
102 u32 txcfg, curLevel, newLevel;
103 enum ath9k_int omask;
105 if (ah->tx_trig_level >= ah->config.max_txtrig_level)
108 omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL);
110 txcfg = REG_READ(ah, AR_TXCFG);
111 curLevel = MS(txcfg, AR_FTRIG);
114 if (curLevel < ah->config.max_txtrig_level)
116 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
118 if (newLevel != curLevel)
119 REG_WRITE(ah, AR_TXCFG,
120 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
122 ath9k_hw_set_interrupts(ah, omask);
124 ah->tx_trig_level = newLevel;
126 return newLevel != curLevel;
128 EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
130 bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
132 #define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
133 #define ATH9K_TIME_QUANTUM 100 /* usec */
134 struct ath_common *common = ath9k_hw_common(ah);
135 struct ath9k_hw_capabilities *pCap = &ah->caps;
136 struct ath9k_tx_queue_info *qi;
138 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
140 if (q >= pCap->total_queues) {
141 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
142 "invalid queue: %u\n", q);
147 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
148 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
149 "inactive queue: %u\n", q);
153 REG_WRITE(ah, AR_Q_TXD, 1 << q);
155 for (wait = wait_time; wait != 0; wait--) {
156 if (ath9k_hw_numtxpending(ah, q) == 0)
158 udelay(ATH9K_TIME_QUANTUM);
161 if (ath9k_hw_numtxpending(ah, q)) {
162 ath_print(common, ATH_DBG_QUEUE,
163 "%s: Num of pending TX Frames %d on Q %d\n",
164 __func__, ath9k_hw_numtxpending(ah, q), q);
166 for (j = 0; j < 2; j++) {
167 tsfLow = REG_READ(ah, AR_TSF_L32);
168 REG_WRITE(ah, AR_QUIET2,
169 SM(10, AR_QUIET2_QUIET_DUR));
170 REG_WRITE(ah, AR_QUIET_PERIOD, 100);
171 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
172 REG_SET_BIT(ah, AR_TIMER_MODE,
175 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
178 ath_print(common, ATH_DBG_QUEUE,
179 "TSF has moved while trying to set "
180 "quiet time TSF: 0x%08x\n", tsfLow);
183 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
186 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
189 while (ath9k_hw_numtxpending(ah, q)) {
191 ath_print(common, ATH_DBG_FATAL,
192 "Failed to stop TX DMA in 100 "
193 "msec after killing last frame\n");
196 udelay(ATH9K_TIME_QUANTUM);
199 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
202 REG_WRITE(ah, AR_Q_TXD, 0);
205 #undef ATH9K_TX_STOP_DMA_TIMEOUT
206 #undef ATH9K_TIME_QUANTUM
208 EXPORT_SYMBOL(ath9k_hw_stoptxdma);
210 void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
211 u32 segLen, bool firstSeg,
212 bool lastSeg, const struct ath_desc *ds0)
214 struct ar5416_desc *ads = AR5416DESC(ds);
217 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
218 } else if (lastSeg) {
220 ads->ds_ctl1 = segLen;
221 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
222 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
225 ads->ds_ctl1 = segLen | AR_TxMore;
229 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
230 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
231 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
232 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
233 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
235 EXPORT_SYMBOL(ath9k_hw_filltxdesc);
237 void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
239 struct ar5416_desc *ads = AR5416DESC(ds);
241 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
242 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
243 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
244 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
245 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
247 EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
249 int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds,
250 struct ath_tx_status *ts)
252 struct ar5416_desc *ads = AR5416DESC(ds);
254 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
257 ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
258 ts->ts_tstamp = ads->AR_SendTimestamp;
262 if (ads->ds_txstatus1 & AR_FrmXmitOK)
263 ts->ts_status |= ATH9K_TX_ACKED;
264 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
265 ts->ts_status |= ATH9K_TXERR_XRETRY;
266 if (ads->ds_txstatus1 & AR_Filtered)
267 ts->ts_status |= ATH9K_TXERR_FILT;
268 if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
269 ts->ts_status |= ATH9K_TXERR_FIFO;
270 ath9k_hw_updatetxtriglevel(ah, true);
272 if (ads->ds_txstatus9 & AR_TxOpExceeded)
273 ts->ts_status |= ATH9K_TXERR_XTXOP;
274 if (ads->ds_txstatus1 & AR_TxTimerExpired)
275 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
277 if (ads->ds_txstatus1 & AR_DescCfgErr)
278 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
279 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
280 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
281 ath9k_hw_updatetxtriglevel(ah, true);
283 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
284 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
285 ath9k_hw_updatetxtriglevel(ah, true);
287 if (ads->ds_txstatus0 & AR_TxBaStatus) {
288 ts->ts_flags |= ATH9K_TX_BA;
289 ts->ba_low = ads->AR_BaBitmapLow;
290 ts->ba_high = ads->AR_BaBitmapHigh;
293 ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
294 switch (ts->ts_rateindex) {
296 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
299 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
302 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
305 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
309 ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
310 ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
311 ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
312 ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
313 ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
314 ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
315 ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
316 ts->evm0 = ads->AR_TxEVM0;
317 ts->evm1 = ads->AR_TxEVM1;
318 ts->evm2 = ads->AR_TxEVM2;
319 ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
320 ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
321 ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
326 EXPORT_SYMBOL(ath9k_hw_txprocdesc);
328 void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
329 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
330 u32 keyIx, enum ath9k_key_type keyType, u32 flags)
332 struct ar5416_desc *ads = AR5416DESC(ds);
334 txPower += ah->txpower_indexoffset;
338 ads->ds_ctl0 = (pktLen & AR_FrameLen)
339 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
340 | SM(txPower, AR_XmitPower)
341 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
342 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
343 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
344 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
347 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
348 | SM(type, AR_FrameType)
349 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
350 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
351 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
353 ads->ds_ctl6 = SM(keyType, AR_EncrType);
355 if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
362 EXPORT_SYMBOL(ath9k_hw_set11n_txdesc);
364 void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
365 struct ath_desc *lastds,
366 u32 durUpdateEn, u32 rtsctsRate,
368 struct ath9k_11n_rate_series series[],
369 u32 nseries, u32 flags)
371 struct ar5416_desc *ads = AR5416DESC(ds);
372 struct ar5416_desc *last_ads = AR5416DESC(lastds);
375 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
376 ds_ctl0 = ads->ds_ctl0;
378 if (flags & ATH9K_TXDESC_RTSENA) {
379 ds_ctl0 &= ~AR_CTSEnable;
380 ds_ctl0 |= AR_RTSEnable;
382 ds_ctl0 &= ~AR_RTSEnable;
383 ds_ctl0 |= AR_CTSEnable;
386 ads->ds_ctl0 = ds_ctl0;
389 (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
392 ads->ds_ctl2 = set11nTries(series, 0)
393 | set11nTries(series, 1)
394 | set11nTries(series, 2)
395 | set11nTries(series, 3)
396 | (durUpdateEn ? AR_DurUpdateEna : 0)
397 | SM(0, AR_BurstDur);
399 ads->ds_ctl3 = set11nRate(series, 0)
400 | set11nRate(series, 1)
401 | set11nRate(series, 2)
402 | set11nRate(series, 3);
404 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
405 | set11nPktDurRTSCTS(series, 1);
407 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
408 | set11nPktDurRTSCTS(series, 3);
410 ads->ds_ctl7 = set11nRateFlags(series, 0)
411 | set11nRateFlags(series, 1)
412 | set11nRateFlags(series, 2)
413 | set11nRateFlags(series, 3)
414 | SM(rtsctsRate, AR_RTSCTSRate);
415 last_ads->ds_ctl2 = ads->ds_ctl2;
416 last_ads->ds_ctl3 = ads->ds_ctl3;
418 EXPORT_SYMBOL(ath9k_hw_set11n_ratescenario);
420 void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
423 struct ar5416_desc *ads = AR5416DESC(ds);
425 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
426 ads->ds_ctl6 &= ~AR_AggrLen;
427 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
429 EXPORT_SYMBOL(ath9k_hw_set11n_aggr_first);
431 void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
434 struct ar5416_desc *ads = AR5416DESC(ds);
437 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
440 ctl6 &= ~AR_PadDelim;
441 ctl6 |= SM(numDelims, AR_PadDelim);
444 EXPORT_SYMBOL(ath9k_hw_set11n_aggr_middle);
446 void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
448 struct ar5416_desc *ads = AR5416DESC(ds);
450 ads->ds_ctl1 |= AR_IsAggr;
451 ads->ds_ctl1 &= ~AR_MoreAggr;
452 ads->ds_ctl6 &= ~AR_PadDelim;
454 EXPORT_SYMBOL(ath9k_hw_set11n_aggr_last);
456 void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
458 struct ar5416_desc *ads = AR5416DESC(ds);
460 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
462 EXPORT_SYMBOL(ath9k_hw_clr11n_aggr);
464 void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
467 struct ar5416_desc *ads = AR5416DESC(ds);
469 ads->ds_ctl2 &= ~AR_BurstDur;
470 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
472 EXPORT_SYMBOL(ath9k_hw_set11n_burstduration);
474 void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
477 struct ar5416_desc *ads = AR5416DESC(ds);
480 ads->ds_ctl0 |= AR_VirtMoreFrag;
482 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
485 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
487 *txqs &= ah->intr_txqs;
488 ah->intr_txqs &= ~(*txqs);
490 EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
492 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
493 const struct ath9k_tx_queue_info *qinfo)
496 struct ath_common *common = ath9k_hw_common(ah);
497 struct ath9k_hw_capabilities *pCap = &ah->caps;
498 struct ath9k_tx_queue_info *qi;
500 if (q >= pCap->total_queues) {
501 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
502 "invalid queue: %u\n", q);
507 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
508 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
509 "inactive queue: %u\n", q);
513 ath_print(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
515 qi->tqi_ver = qinfo->tqi_ver;
516 qi->tqi_subtype = qinfo->tqi_subtype;
517 qi->tqi_qflags = qinfo->tqi_qflags;
518 qi->tqi_priority = qinfo->tqi_priority;
519 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
520 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
522 qi->tqi_aifs = INIT_AIFS;
523 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
524 cw = min(qinfo->tqi_cwmin, 1024U);
526 while (qi->tqi_cwmin < cw)
527 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
529 qi->tqi_cwmin = qinfo->tqi_cwmin;
530 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
531 cw = min(qinfo->tqi_cwmax, 1024U);
533 while (qi->tqi_cwmax < cw)
534 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
536 qi->tqi_cwmax = INIT_CWMAX;
538 if (qinfo->tqi_shretry != 0)
539 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
541 qi->tqi_shretry = INIT_SH_RETRY;
542 if (qinfo->tqi_lgretry != 0)
543 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
545 qi->tqi_lgretry = INIT_LG_RETRY;
546 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
547 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
548 qi->tqi_burstTime = qinfo->tqi_burstTime;
549 qi->tqi_readyTime = qinfo->tqi_readyTime;
551 switch (qinfo->tqi_subtype) {
553 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
554 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
562 EXPORT_SYMBOL(ath9k_hw_set_txq_props);
564 bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
565 struct ath9k_tx_queue_info *qinfo)
567 struct ath_common *common = ath9k_hw_common(ah);
568 struct ath9k_hw_capabilities *pCap = &ah->caps;
569 struct ath9k_tx_queue_info *qi;
571 if (q >= pCap->total_queues) {
572 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
573 "invalid queue: %u\n", q);
578 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
579 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
580 "inactive queue: %u\n", q);
584 qinfo->tqi_qflags = qi->tqi_qflags;
585 qinfo->tqi_ver = qi->tqi_ver;
586 qinfo->tqi_subtype = qi->tqi_subtype;
587 qinfo->tqi_qflags = qi->tqi_qflags;
588 qinfo->tqi_priority = qi->tqi_priority;
589 qinfo->tqi_aifs = qi->tqi_aifs;
590 qinfo->tqi_cwmin = qi->tqi_cwmin;
591 qinfo->tqi_cwmax = qi->tqi_cwmax;
592 qinfo->tqi_shretry = qi->tqi_shretry;
593 qinfo->tqi_lgretry = qi->tqi_lgretry;
594 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
595 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
596 qinfo->tqi_burstTime = qi->tqi_burstTime;
597 qinfo->tqi_readyTime = qi->tqi_readyTime;
601 EXPORT_SYMBOL(ath9k_hw_get_txq_props);
603 int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
604 const struct ath9k_tx_queue_info *qinfo)
606 struct ath_common *common = ath9k_hw_common(ah);
607 struct ath9k_tx_queue_info *qi;
608 struct ath9k_hw_capabilities *pCap = &ah->caps;
612 case ATH9K_TX_QUEUE_BEACON:
613 q = pCap->total_queues - 1;
615 case ATH9K_TX_QUEUE_CAB:
616 q = pCap->total_queues - 2;
618 case ATH9K_TX_QUEUE_PSPOLL:
621 case ATH9K_TX_QUEUE_UAPSD:
622 q = pCap->total_queues - 3;
624 case ATH9K_TX_QUEUE_DATA:
625 for (q = 0; q < pCap->total_queues; q++)
626 if (ah->txq[q].tqi_type ==
627 ATH9K_TX_QUEUE_INACTIVE)
629 if (q == pCap->total_queues) {
630 ath_print(common, ATH_DBG_FATAL,
631 "No available TX queue\n");
636 ath_print(common, ATH_DBG_FATAL,
637 "Invalid TX queue type: %u\n", type);
641 ath_print(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
644 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
645 ath_print(common, ATH_DBG_FATAL,
646 "TX queue: %u already active\n", q);
649 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
653 TXQ_FLAG_TXOKINT_ENABLE
654 | TXQ_FLAG_TXERRINT_ENABLE
655 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
656 qi->tqi_aifs = INIT_AIFS;
657 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
658 qi->tqi_cwmax = INIT_CWMAX;
659 qi->tqi_shretry = INIT_SH_RETRY;
660 qi->tqi_lgretry = INIT_LG_RETRY;
661 qi->tqi_physCompBuf = 0;
663 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
664 (void) ath9k_hw_set_txq_props(ah, q, qinfo);
669 EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
671 bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
673 struct ath9k_hw_capabilities *pCap = &ah->caps;
674 struct ath_common *common = ath9k_hw_common(ah);
675 struct ath9k_tx_queue_info *qi;
677 if (q >= pCap->total_queues) {
678 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
679 "invalid queue: %u\n", q);
683 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
684 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
685 "inactive queue: %u\n", q);
689 ath_print(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
691 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
692 ah->txok_interrupt_mask &= ~(1 << q);
693 ah->txerr_interrupt_mask &= ~(1 << q);
694 ah->txdesc_interrupt_mask &= ~(1 << q);
695 ah->txeol_interrupt_mask &= ~(1 << q);
696 ah->txurn_interrupt_mask &= ~(1 << q);
697 ath9k_hw_set_txq_interrupts(ah, qi);
701 EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
703 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
705 struct ath9k_hw_capabilities *pCap = &ah->caps;
706 struct ath_common *common = ath9k_hw_common(ah);
707 struct ath9k_channel *chan = ah->curchan;
708 struct ath9k_tx_queue_info *qi;
709 u32 cwMin, chanCwMin, value;
711 if (q >= pCap->total_queues) {
712 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
713 "invalid queue: %u\n", q);
718 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
719 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
720 "inactive queue: %u\n", q);
724 ath_print(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
726 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
727 if (chan && IS_CHAN_B(chan))
728 chanCwMin = INIT_CWMIN_11B;
730 chanCwMin = INIT_CWMIN;
732 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
734 cwMin = qi->tqi_cwmin;
736 REG_WRITE(ah, AR_DLCL_IFS(q),
737 SM(cwMin, AR_D_LCL_IFS_CWMIN) |
738 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
739 SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
741 REG_WRITE(ah, AR_DRETRY_LIMIT(q),
742 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
743 SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
744 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
746 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
747 REG_WRITE(ah, AR_DMISC(q),
748 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
750 if (qi->tqi_cbrPeriod) {
751 REG_WRITE(ah, AR_QCBRCFG(q),
752 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
753 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
754 REG_WRITE(ah, AR_QMISC(q),
755 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
756 (qi->tqi_cbrOverflowLimit ?
757 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
759 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
760 REG_WRITE(ah, AR_QRDYTIMECFG(q),
761 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
765 REG_WRITE(ah, AR_DCHNTIME(q),
766 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
767 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
769 if (qi->tqi_burstTime
770 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
771 REG_WRITE(ah, AR_QMISC(q),
772 REG_READ(ah, AR_QMISC(q)) |
773 AR_Q_MISC_RDYTIME_EXP_POLICY);
777 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
778 REG_WRITE(ah, AR_DMISC(q),
779 REG_READ(ah, AR_DMISC(q)) |
780 AR_D_MISC_POST_FR_BKOFF_DIS);
782 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
783 REG_WRITE(ah, AR_DMISC(q),
784 REG_READ(ah, AR_DMISC(q)) |
785 AR_D_MISC_FRAG_BKOFF_EN);
787 switch (qi->tqi_type) {
788 case ATH9K_TX_QUEUE_BEACON:
789 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
790 | AR_Q_MISC_FSP_DBA_GATED
791 | AR_Q_MISC_BEACON_USE
792 | AR_Q_MISC_CBR_INCR_DIS1);
794 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
795 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
796 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
797 | AR_D_MISC_BEACON_USE
798 | AR_D_MISC_POST_FR_BKOFF_DIS);
800 case ATH9K_TX_QUEUE_CAB:
801 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
802 | AR_Q_MISC_FSP_DBA_GATED
803 | AR_Q_MISC_CBR_INCR_DIS1
804 | AR_Q_MISC_CBR_INCR_DIS0);
805 value = (qi->tqi_readyTime -
806 (ah->config.sw_beacon_response_time -
807 ah->config.dma_beacon_response_time) -
808 ah->config.additional_swba_backoff) * 1024;
809 REG_WRITE(ah, AR_QRDYTIMECFG(q),
810 value | AR_Q_RDYTIMECFG_EN);
811 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
812 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
813 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
815 case ATH9K_TX_QUEUE_PSPOLL:
816 REG_WRITE(ah, AR_QMISC(q),
817 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
819 case ATH9K_TX_QUEUE_UAPSD:
820 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
821 AR_D_MISC_POST_FR_BKOFF_DIS);
827 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
828 REG_WRITE(ah, AR_DMISC(q),
829 REG_READ(ah, AR_DMISC(q)) |
830 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
831 AR_D_MISC_ARB_LOCKOUT_CNTRL) |
832 AR_D_MISC_POST_FR_BKOFF_DIS);
835 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
836 ah->txok_interrupt_mask |= 1 << q;
838 ah->txok_interrupt_mask &= ~(1 << q);
839 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
840 ah->txerr_interrupt_mask |= 1 << q;
842 ah->txerr_interrupt_mask &= ~(1 << q);
843 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
844 ah->txdesc_interrupt_mask |= 1 << q;
846 ah->txdesc_interrupt_mask &= ~(1 << q);
847 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
848 ah->txeol_interrupt_mask |= 1 << q;
850 ah->txeol_interrupt_mask &= ~(1 << q);
851 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
852 ah->txurn_interrupt_mask |= 1 << q;
854 ah->txurn_interrupt_mask &= ~(1 << q);
855 ath9k_hw_set_txq_interrupts(ah, qi);
859 EXPORT_SYMBOL(ath9k_hw_resettxqueue);
861 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
862 u32 pa, struct ath_desc *nds, u64 tsf)
864 struct ar5416_desc ads;
865 struct ar5416_desc *adsp = AR5416DESC(ds);
868 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
871 ads.u.rx = adsp->u.rx;
873 ds->ds_rxstat.rs_status = 0;
874 ds->ds_rxstat.rs_flags = 0;
876 ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
877 ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
879 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
880 ds->ds_rxstat.rs_rssi = ATH9K_RSSI_BAD;
881 ds->ds_rxstat.rs_rssi_ctl0 = ATH9K_RSSI_BAD;
882 ds->ds_rxstat.rs_rssi_ctl1 = ATH9K_RSSI_BAD;
883 ds->ds_rxstat.rs_rssi_ctl2 = ATH9K_RSSI_BAD;
884 ds->ds_rxstat.rs_rssi_ext0 = ATH9K_RSSI_BAD;
885 ds->ds_rxstat.rs_rssi_ext1 = ATH9K_RSSI_BAD;
886 ds->ds_rxstat.rs_rssi_ext2 = ATH9K_RSSI_BAD;
888 ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
889 ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
891 ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
893 ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
895 ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4,
897 ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4,
899 ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4,
902 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
903 ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
905 ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
907 ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
908 ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
910 ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
911 ds->ds_rxstat.rs_moreaggr =
912 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
913 ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
914 ds->ds_rxstat.rs_flags =
915 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
916 ds->ds_rxstat.rs_flags |=
917 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
919 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
920 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
921 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
922 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
923 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
924 ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
926 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
927 if (ads.ds_rxstatus8 & AR_CRCErr)
928 ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
929 else if (ads.ds_rxstatus8 & AR_PHYErr) {
930 ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
931 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
932 ds->ds_rxstat.rs_phyerr = phyerr;
933 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
934 ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
935 else if (ads.ds_rxstatus8 & AR_MichaelErr)
936 ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
941 EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
943 void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
946 struct ar5416_desc *ads = AR5416DESC(ds);
947 struct ath9k_hw_capabilities *pCap = &ah->caps;
949 ads->ds_ctl1 = size & AR_BufLen;
950 if (flags & ATH9K_RXDESC_INTREQ)
951 ads->ds_ctl1 |= AR_RxIntrReq;
953 ads->ds_rxstatus8 &= ~AR_RxDone;
954 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
955 memset(&(ads->u), 0, sizeof(ads->u));
957 EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
960 * This can stop or re-enables RX.
962 * If bool is set this will kill any frame which is currently being
963 * transferred between the MAC and baseband and also prevent any new
964 * frames from getting started.
966 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
971 REG_SET_BIT(ah, AR_DIAG_SW,
972 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
974 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
975 0, AH_WAIT_TIMEOUT)) {
976 REG_CLR_BIT(ah, AR_DIAG_SW,
980 reg = REG_READ(ah, AR_OBS_BUS_1);
981 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
982 "RX failed to go idle in 10 ms RXSM=0x%x\n",
988 REG_CLR_BIT(ah, AR_DIAG_SW,
989 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
994 EXPORT_SYMBOL(ath9k_hw_setrxabort);
996 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
998 REG_WRITE(ah, AR_RXDP, rxdp);
1000 EXPORT_SYMBOL(ath9k_hw_putrxbuf);
1002 void ath9k_hw_rxena(struct ath_hw *ah)
1004 REG_WRITE(ah, AR_CR, AR_CR_RXE);
1006 EXPORT_SYMBOL(ath9k_hw_rxena);
1008 void ath9k_hw_startpcureceive(struct ath_hw *ah)
1010 ath9k_enable_mib_counters(ah);
1012 ath9k_ani_reset(ah);
1014 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
1016 EXPORT_SYMBOL(ath9k_hw_startpcureceive);
1018 void ath9k_hw_stoppcurecv(struct ath_hw *ah)
1020 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
1022 ath9k_hw_disable_mib_counters(ah);
1024 EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
1026 bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
1028 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
1029 #define AH_RX_TIME_QUANTUM 100 /* usec */
1030 struct ath_common *common = ath9k_hw_common(ah);
1033 REG_WRITE(ah, AR_CR, AR_CR_RXD);
1035 /* Wait for rx enable bit to go low */
1036 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
1037 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
1039 udelay(AH_TIME_QUANTUM);
1043 ath_print(common, ATH_DBG_FATAL,
1044 "DMA failed to stop in %d ms "
1045 "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
1046 AH_RX_STOP_DMA_TIMEOUT / 1000,
1047 REG_READ(ah, AR_CR),
1048 REG_READ(ah, AR_DIAG_SW));
1054 #undef AH_RX_TIME_QUANTUM
1055 #undef AH_RX_STOP_DMA_TIMEOUT
1057 EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
1059 int ath9k_hw_beaconq_setup(struct ath_hw *ah)
1061 struct ath9k_tx_queue_info qi;
1063 memset(&qi, 0, sizeof(qi));
1067 /* NB: don't enable any interrupts */
1068 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
1070 EXPORT_SYMBOL(ath9k_hw_beaconq_setup);