1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
6 * Purpose: driver entry for initial, open, close, tx and rx.
14 * vt6655_probe - module initial (insmod) driver entry
15 * vt6655_remove - module remove entry
16 * device_free_info - device structure resource free function
17 * device_print_info - print out resource
18 * device_rx_srv - rx service function
19 * device_alloc_rx_buf - rx buffer pre-allocated function
20 * device_free_rx_buf - free rx buffer function
21 * device_free_tx_buf - free tx buffer function
22 * device_init_rd0_ring - initial rd dma0 ring
23 * device_init_rd1_ring - initial rd dma1 ring
24 * device_init_td0_ring - initial tx dma0 ring buffer
25 * device_init_td1_ring - initial tx dma1 ring buffer
26 * device_init_registers - initial MAC & BBP & RF internal registers.
27 * device_init_rings - initial tx/rx ring buffer
28 * device_free_rings - free all allocated ring buffer
29 * device_tx_srv - tx interrupt service function
34 #include <linux/file.h>
44 #include <linux/delay.h>
45 #include <linux/kthread.h>
46 #include <linux/slab.h>
48 /*--------------------- Static Definitions -------------------------*/
50 * Define module options
52 MODULE_AUTHOR("VIA Networking Technologies, Inc., <lyndonchen@vntek.com.tw>");
53 MODULE_LICENSE("GPL");
54 MODULE_DESCRIPTION("VIA Networking Solomon-A/B/G Wireless LAN Adapter Driver");
56 #define DEVICE_PARAM(N, D)
58 #define RX_DESC_MIN0 16
59 #define RX_DESC_MAX0 128
60 #define RX_DESC_DEF0 32
61 DEVICE_PARAM(RxDescriptors0, "Number of receive descriptors0");
63 #define RX_DESC_MIN1 16
64 #define RX_DESC_MAX1 128
65 #define RX_DESC_DEF1 32
66 DEVICE_PARAM(RxDescriptors1, "Number of receive descriptors1");
68 #define TX_DESC_MIN0 16
69 #define TX_DESC_MAX0 128
70 #define TX_DESC_DEF0 32
71 DEVICE_PARAM(TxDescriptors0, "Number of transmit descriptors0");
73 #define TX_DESC_MIN1 16
74 #define TX_DESC_MAX1 128
75 #define TX_DESC_DEF1 64
76 DEVICE_PARAM(TxDescriptors1, "Number of transmit descriptors1");
78 #define INT_WORKS_DEF 20
79 #define INT_WORKS_MIN 10
80 #define INT_WORKS_MAX 64
82 DEVICE_PARAM(int_works, "Number of packets per interrupt services");
84 #define RTS_THRESH_DEF 2347
86 #define FRAG_THRESH_DEF 2346
88 #define SHORT_RETRY_MIN 0
89 #define SHORT_RETRY_MAX 31
90 #define SHORT_RETRY_DEF 8
92 DEVICE_PARAM(ShortRetryLimit, "Short frame retry limits");
94 #define LONG_RETRY_MIN 0
95 #define LONG_RETRY_MAX 15
96 #define LONG_RETRY_DEF 4
98 DEVICE_PARAM(LongRetryLimit, "long frame retry limits");
100 /* BasebandType[] baseband type selected
101 * 0: indicate 802.11a type
102 * 1: indicate 802.11b type
103 * 2: indicate 802.11g type
105 #define BBP_TYPE_MIN 0
106 #define BBP_TYPE_MAX 2
107 #define BBP_TYPE_DEF 2
109 DEVICE_PARAM(BasebandType, "baseband type");
112 * Static vars definitions
114 static const struct pci_device_id vt6655_pci_id_table[] = {
115 { PCI_VDEVICE(VIA, 0x3253) },
119 /*--------------------- Static Functions --------------------------*/
121 static int vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent);
122 static void device_free_info(struct vnt_private *priv);
123 static void device_print_info(struct vnt_private *priv);
125 static void vt6655_mac_write_bssid_addr(void __iomem *iobase, const u8 *mac_addr);
126 static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr);
128 static int device_init_rd0_ring(struct vnt_private *priv);
129 static int device_init_rd1_ring(struct vnt_private *priv);
130 static int device_init_td0_ring(struct vnt_private *priv);
131 static int device_init_td1_ring(struct vnt_private *priv);
133 static int device_rx_srv(struct vnt_private *priv, unsigned int idx);
134 static int device_tx_srv(struct vnt_private *priv, unsigned int idx);
135 static bool device_alloc_rx_buf(struct vnt_private *, struct vnt_rx_desc *);
136 static void device_free_rx_buf(struct vnt_private *priv,
137 struct vnt_rx_desc *rd);
138 static void device_init_registers(struct vnt_private *priv);
139 static void device_free_tx_buf(struct vnt_private *, struct vnt_tx_desc *);
140 static void device_free_td0_ring(struct vnt_private *priv);
141 static void device_free_td1_ring(struct vnt_private *priv);
142 static void device_free_rd0_ring(struct vnt_private *priv);
143 static void device_free_rd1_ring(struct vnt_private *priv);
144 static void device_free_rings(struct vnt_private *priv);
146 /*--------------------- Export Variables --------------------------*/
148 /*--------------------- Export Functions --------------------------*/
150 static void vt6655_remove(struct pci_dev *pcid)
152 struct vnt_private *priv = pci_get_drvdata(pcid);
156 device_free_info(priv);
159 static void device_get_options(struct vnt_private *priv)
161 struct vnt_options *opts = &priv->opts;
163 opts->rx_descs0 = RX_DESC_DEF0;
164 opts->rx_descs1 = RX_DESC_DEF1;
165 opts->tx_descs[0] = TX_DESC_DEF0;
166 opts->tx_descs[1] = TX_DESC_DEF1;
167 opts->int_works = INT_WORKS_DEF;
169 opts->short_retry = SHORT_RETRY_DEF;
170 opts->long_retry = LONG_RETRY_DEF;
171 opts->bbp_type = BBP_TYPE_DEF;
175 device_set_options(struct vnt_private *priv)
177 priv->byShortRetryLimit = priv->opts.short_retry;
178 priv->byLongRetryLimit = priv->opts.long_retry;
179 priv->byBBType = priv->opts.bbp_type;
180 priv->byPacketType = priv->byBBType;
181 priv->byAutoFBCtrl = AUTO_FB_0;
182 priv->bUpdateBBVGA = true;
183 priv->preamble_type = 0;
185 pr_debug(" byShortRetryLimit= %d\n", (int)priv->byShortRetryLimit);
186 pr_debug(" byLongRetryLimit= %d\n", (int)priv->byLongRetryLimit);
187 pr_debug(" preamble_type= %d\n", (int)priv->preamble_type);
188 pr_debug(" byShortPreamble= %d\n", (int)priv->byShortPreamble);
189 pr_debug(" byBBType= %d\n", (int)priv->byBBType);
192 static void vt6655_mac_write_bssid_addr(void __iomem *iobase, const u8 *mac_addr)
194 iowrite8(1, iobase + MAC_REG_PAGE1SEL);
195 for (int i = 0; i < 6; i++)
196 iowrite8(mac_addr[i], iobase + MAC_REG_BSSID0 + i);
197 iowrite8(0, iobase + MAC_REG_PAGE1SEL);
200 static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr)
202 iowrite8(1, iobase + MAC_REG_PAGE1SEL);
203 for (int i = 0; i < 6; i++)
204 mac_addr[i] = ioread8(iobase + MAC_REG_PAR0 + i);
205 iowrite8(0, iobase + MAC_REG_PAGE1SEL);
209 * Initialisation of MAC & BBP registers
212 static void device_init_registers(struct vnt_private *priv)
216 unsigned char byValue;
217 unsigned char byCCKPwrdBm = 0;
218 unsigned char byOFDMPwrdBm = 0;
221 bb_software_reset(priv);
223 /* Do MACbSoftwareReset in MACvInitialize */
224 MACbSoftwareReset(priv);
228 /* Only used in 11g type, sync with ERP IE */
229 priv->bProtectMode = false;
231 priv->bNonERPPresent = false;
232 priv->bBarkerPreambleMd = false;
233 priv->wCurrentRate = RATE_1M;
234 priv->byTopOFDMBasicRate = RATE_24M;
235 priv->byTopCCKBasicRate = RATE_1M;
238 MACvInitialize(priv);
241 priv->local_id = ioread8(priv->port_offset + MAC_REG_LOCALID);
243 spin_lock_irqsave(&priv->lock, flags);
245 SROMvReadAllContents(priv->port_offset, priv->abyEEPROM);
247 spin_unlock_irqrestore(&priv->lock, flags);
249 /* Get Channel range */
250 priv->byMinChannel = 1;
251 priv->byMaxChannel = CB_MAX_CHANNEL;
254 byValue = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_ANTENNA);
255 if (byValue & EEP_ANTINV)
256 priv->bTxRxAntInv = true;
258 priv->bTxRxAntInv = false;
260 byValue &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
261 /* if not set default is All */
263 byValue = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
265 if (byValue == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
266 priv->byAntennaCount = 2;
267 priv->byTxAntennaMode = ANT_B;
268 priv->dwTxAntennaSel = 1;
269 priv->dwRxAntennaSel = 1;
271 if (priv->bTxRxAntInv)
272 priv->byRxAntennaMode = ANT_A;
274 priv->byRxAntennaMode = ANT_B;
276 priv->byAntennaCount = 1;
277 priv->dwTxAntennaSel = 0;
278 priv->dwRxAntennaSel = 0;
280 if (byValue & EEP_ANTENNA_AUX) {
281 priv->byTxAntennaMode = ANT_A;
283 if (priv->bTxRxAntInv)
284 priv->byRxAntennaMode = ANT_B;
286 priv->byRxAntennaMode = ANT_A;
288 priv->byTxAntennaMode = ANT_B;
290 if (priv->bTxRxAntInv)
291 priv->byRxAntennaMode = ANT_A;
293 priv->byRxAntennaMode = ANT_B;
297 /* Set initial antenna mode */
298 bb_set_tx_antenna_mode(priv, priv->byTxAntennaMode);
299 bb_set_rx_antenna_mode(priv, priv->byRxAntennaMode);
301 /* zonetype initial */
302 priv->byOriginalZonetype = priv->abyEEPROM[EEP_OFS_ZONETYPE];
304 if (!priv->bZoneRegExist)
305 priv->byZoneType = priv->abyEEPROM[EEP_OFS_ZONETYPE];
307 pr_debug("priv->byZoneType = %x\n", priv->byZoneType);
312 /* Get Desire Power Value */
313 priv->byCurPwr = 0xFF;
314 priv->byCCKPwr = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_PWR_CCK);
315 priv->byOFDMPwrG = SROMbyReadEmbedded(priv->port_offset,
318 /* Load power Table */
319 for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
320 priv->abyCCKPwrTbl[ii + 1] =
321 SROMbyReadEmbedded(priv->port_offset,
322 (unsigned char)(ii + EEP_OFS_CCK_PWR_TBL));
323 if (priv->abyCCKPwrTbl[ii + 1] == 0)
324 priv->abyCCKPwrTbl[ii + 1] = priv->byCCKPwr;
326 priv->abyOFDMPwrTbl[ii + 1] =
327 SROMbyReadEmbedded(priv->port_offset,
328 (unsigned char)(ii + EEP_OFS_OFDM_PWR_TBL));
329 if (priv->abyOFDMPwrTbl[ii + 1] == 0)
330 priv->abyOFDMPwrTbl[ii + 1] = priv->byOFDMPwrG;
332 priv->abyCCKDefaultPwr[ii + 1] = byCCKPwrdBm;
333 priv->abyOFDMDefaultPwr[ii + 1] = byOFDMPwrdBm;
336 /* recover 12,13 ,14channel for EUROPE by 11 channel */
337 for (ii = 11; ii < 14; ii++) {
338 priv->abyCCKPwrTbl[ii] = priv->abyCCKPwrTbl[10];
339 priv->abyOFDMPwrTbl[ii] = priv->abyOFDMPwrTbl[10];
342 /* Load OFDM A Power Table */
343 for (ii = 0; ii < CB_MAX_CHANNEL_5G; ii++) {
344 priv->abyOFDMPwrTbl[ii + CB_MAX_CHANNEL_24G + 1] =
345 SROMbyReadEmbedded(priv->port_offset,
346 (unsigned char)(ii + EEP_OFS_OFDMA_PWR_TBL));
348 priv->abyOFDMDefaultPwr[ii + CB_MAX_CHANNEL_24G + 1] =
349 SROMbyReadEmbedded(priv->port_offset,
350 (unsigned char)(ii + EEP_OFS_OFDMA_PWR_dBm));
353 if (priv->local_id > REV_ID_VT3253_B1) {
354 MACvSelectPage1(priv->port_offset);
356 iowrite8(MSRCTL1_TXPWR | MSRCTL1_CSAPAREN, priv->port_offset + MAC_REG_MSRCTL + 1);
358 MACvSelectPage0(priv->port_offset);
361 /* use relative tx timeout and 802.11i D4 */
362 vt6655_mac_word_reg_bits_on(priv->port_offset, MAC_REG_CFG,
363 (CFG_TKIPOPT | CFG_NOTXTIMEOUT));
365 /* set performance parameter by registry */
366 MACvSetShortRetryLimit(priv, priv->byShortRetryLimit);
367 MACvSetLongRetryLimit(priv, priv->byLongRetryLimit);
369 /* reset TSF counter */
370 iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
371 /* enable TSF counter */
372 iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
374 /* initialize BBP registers */
375 bb_vt3253_init(priv);
377 if (priv->bUpdateBBVGA) {
378 priv->byBBVGACurrent = priv->abyBBVGA[0];
379 priv->byBBVGANew = priv->byBBVGACurrent;
380 bb_set_vga_gain_offset(priv, priv->abyBBVGA[0]);
383 bb_set_rx_antenna_mode(priv, priv->byRxAntennaMode);
384 bb_set_tx_antenna_mode(priv, priv->byTxAntennaMode);
386 /* Set BB and packet type at the same time. */
387 /* Set Short Slot Time, xIFS, and RSPINF. */
388 priv->wCurrentRate = RATE_54M;
390 priv->radio_off = false;
392 priv->byRadioCtl = SROMbyReadEmbedded(priv->port_offset,
394 priv->hw_radio_off = false;
396 if (priv->byRadioCtl & EEP_RADIOCTL_ENABLE) {
398 priv->byGPIO = ioread8(priv->port_offset + MAC_REG_GPIOCTL1);
400 if (((priv->byGPIO & GPIO0_DATA) &&
401 !(priv->byRadioCtl & EEP_RADIOCTL_INV)) ||
402 (!(priv->byGPIO & GPIO0_DATA) &&
403 (priv->byRadioCtl & EEP_RADIOCTL_INV)))
404 priv->hw_radio_off = true;
407 if (priv->hw_radio_off || priv->bRadioControlOff)
408 CARDbRadioPowerOff(priv);
410 /* get Permanent network address */
411 SROMvReadEtherAddress(priv->port_offset, priv->abyCurrentNetAddr);
412 pr_debug("Network address = %pM\n", priv->abyCurrentNetAddr);
414 /* reset Tx pointer */
415 CARDvSafeResetRx(priv);
416 /* reset Rx pointer */
417 CARDvSafeResetTx(priv);
419 if (priv->local_id <= REV_ID_VT3253_A1)
420 vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_RCR, RCR_WPAERR);
423 MACvReceive0(priv->port_offset);
424 MACvReceive1(priv->port_offset);
426 /* start the adapter */
427 iowrite8(HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON, priv->port_offset + MAC_REG_HOSTCR);
430 static void device_print_info(struct vnt_private *priv)
432 dev_info(&priv->pcid->dev, "MAC=%pM IO=0x%lx Mem=0x%lx IRQ=%d\n",
433 priv->abyCurrentNetAddr, (unsigned long)priv->ioaddr,
434 (unsigned long)priv->port_offset, priv->pcid->irq);
437 static void device_free_info(struct vnt_private *priv)
443 ieee80211_unregister_hw(priv->hw);
445 if (priv->port_offset)
446 iounmap(priv->port_offset);
449 pci_release_regions(priv->pcid);
452 ieee80211_free_hw(priv->hw);
455 static bool device_init_rings(struct vnt_private *priv)
459 /*allocate all RD/TD rings a single pool*/
460 vir_pool = dma_alloc_coherent(&priv->pcid->dev,
461 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
462 priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
463 priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
464 priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
465 &priv->pool_dma, GFP_ATOMIC);
467 dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n");
471 priv->aRD0Ring = vir_pool;
472 priv->aRD1Ring = vir_pool +
473 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
475 priv->rd0_pool_dma = priv->pool_dma;
476 priv->rd1_pool_dma = priv->rd0_pool_dma +
477 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
479 priv->tx0_bufs = dma_alloc_coherent(&priv->pcid->dev,
480 priv->opts.tx_descs[0] * PKT_BUF_SZ +
481 priv->opts.tx_descs[1] * PKT_BUF_SZ +
484 &priv->tx_bufs_dma0, GFP_ATOMIC);
485 if (!priv->tx0_bufs) {
486 dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n");
488 dma_free_coherent(&priv->pcid->dev,
489 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
490 priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
491 priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
492 priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
493 vir_pool, priv->pool_dma);
497 priv->td0_pool_dma = priv->rd1_pool_dma +
498 priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc);
500 priv->td1_pool_dma = priv->td0_pool_dma +
501 priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc);
503 /* vir_pool: pvoid type */
504 priv->apTD0Rings = vir_pool
505 + priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc)
506 + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc);
508 priv->apTD1Rings = vir_pool
509 + priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc)
510 + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc)
511 + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc);
513 priv->tx1_bufs = priv->tx0_bufs +
514 priv->opts.tx_descs[0] * PKT_BUF_SZ;
516 priv->tx_beacon_bufs = priv->tx1_bufs +
517 priv->opts.tx_descs[1] * PKT_BUF_SZ;
519 priv->pbyTmpBuff = priv->tx_beacon_bufs +
522 priv->tx_bufs_dma1 = priv->tx_bufs_dma0 +
523 priv->opts.tx_descs[0] * PKT_BUF_SZ;
525 priv->tx_beacon_dma = priv->tx_bufs_dma1 +
526 priv->opts.tx_descs[1] * PKT_BUF_SZ;
531 static void device_free_rings(struct vnt_private *priv)
533 dma_free_coherent(&priv->pcid->dev,
534 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
535 priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
536 priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
537 priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
538 priv->aRD0Ring, priv->pool_dma);
541 dma_free_coherent(&priv->pcid->dev,
542 priv->opts.tx_descs[0] * PKT_BUF_SZ +
543 priv->opts.tx_descs[1] * PKT_BUF_SZ +
546 priv->tx0_bufs, priv->tx_bufs_dma0);
549 static int device_init_rd0_ring(struct vnt_private *priv)
552 dma_addr_t curr = priv->rd0_pool_dma;
553 struct vnt_rx_desc *desc;
556 /* Init the RD0 ring entries */
557 for (i = 0; i < priv->opts.rx_descs0;
558 i ++, curr += sizeof(struct vnt_rx_desc)) {
559 desc = &priv->aRD0Ring[i];
560 desc->rd_info = kzalloc(sizeof(*desc->rd_info), GFP_KERNEL);
561 if (!desc->rd_info) {
566 if (!device_alloc_rx_buf(priv, desc)) {
567 dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
572 desc->next = &priv->aRD0Ring[(i + 1) % priv->opts.rx_descs0];
573 desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
577 priv->aRD0Ring[i - 1].next_desc = cpu_to_le32(priv->rd0_pool_dma);
578 priv->pCurrRD[0] = &priv->aRD0Ring[0];
583 kfree(desc->rd_info);
587 desc = &priv->aRD0Ring[i];
588 device_free_rx_buf(priv, desc);
589 kfree(desc->rd_info);
595 static int device_init_rd1_ring(struct vnt_private *priv)
598 dma_addr_t curr = priv->rd1_pool_dma;
599 struct vnt_rx_desc *desc;
602 /* Init the RD1 ring entries */
603 for (i = 0; i < priv->opts.rx_descs1;
604 i ++, curr += sizeof(struct vnt_rx_desc)) {
605 desc = &priv->aRD1Ring[i];
606 desc->rd_info = kzalloc(sizeof(*desc->rd_info), GFP_KERNEL);
607 if (!desc->rd_info) {
612 if (!device_alloc_rx_buf(priv, desc)) {
613 dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
618 desc->next = &priv->aRD1Ring[(i + 1) % priv->opts.rx_descs1];
619 desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
623 priv->aRD1Ring[i - 1].next_desc = cpu_to_le32(priv->rd1_pool_dma);
624 priv->pCurrRD[1] = &priv->aRD1Ring[0];
629 kfree(desc->rd_info);
633 desc = &priv->aRD1Ring[i];
634 device_free_rx_buf(priv, desc);
635 kfree(desc->rd_info);
641 static void device_free_rd0_ring(struct vnt_private *priv)
645 for (i = 0; i < priv->opts.rx_descs0; i++) {
646 struct vnt_rx_desc *desc = &priv->aRD0Ring[i];
648 device_free_rx_buf(priv, desc);
649 kfree(desc->rd_info);
653 static void device_free_rd1_ring(struct vnt_private *priv)
657 for (i = 0; i < priv->opts.rx_descs1; i++) {
658 struct vnt_rx_desc *desc = &priv->aRD1Ring[i];
660 device_free_rx_buf(priv, desc);
661 kfree(desc->rd_info);
665 static int device_init_td0_ring(struct vnt_private *priv)
669 struct vnt_tx_desc *desc;
672 curr = priv->td0_pool_dma;
673 for (i = 0; i < priv->opts.tx_descs[0];
674 i++, curr += sizeof(struct vnt_tx_desc)) {
675 desc = &priv->apTD0Rings[i];
676 desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
677 if (!desc->td_info) {
682 desc->td_info->buf = priv->tx0_bufs + i * PKT_BUF_SZ;
683 desc->td_info->buf_dma = priv->tx_bufs_dma0 + i * PKT_BUF_SZ;
685 desc->next = &(priv->apTD0Rings[(i + 1) % priv->opts.tx_descs[0]]);
686 desc->next_desc = cpu_to_le32(curr +
687 sizeof(struct vnt_tx_desc));
691 priv->apTD0Rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma);
692 priv->apTailTD[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0];
698 desc = &priv->apTD0Rings[i];
699 kfree(desc->td_info);
705 static int device_init_td1_ring(struct vnt_private *priv)
709 struct vnt_tx_desc *desc;
712 /* Init the TD ring entries */
713 curr = priv->td1_pool_dma;
714 for (i = 0; i < priv->opts.tx_descs[1];
715 i++, curr += sizeof(struct vnt_tx_desc)) {
716 desc = &priv->apTD1Rings[i];
717 desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
718 if (!desc->td_info) {
723 desc->td_info->buf = priv->tx1_bufs + i * PKT_BUF_SZ;
724 desc->td_info->buf_dma = priv->tx_bufs_dma1 + i * PKT_BUF_SZ;
726 desc->next = &(priv->apTD1Rings[(i + 1) % priv->opts.tx_descs[1]]);
727 desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
731 priv->apTD1Rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma);
732 priv->apTailTD[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0];
738 desc = &priv->apTD1Rings[i];
739 kfree(desc->td_info);
745 static void device_free_td0_ring(struct vnt_private *priv)
749 for (i = 0; i < priv->opts.tx_descs[0]; i++) {
750 struct vnt_tx_desc *desc = &priv->apTD0Rings[i];
751 struct vnt_td_info *td_info = desc->td_info;
753 dev_kfree_skb(td_info->skb);
754 kfree(desc->td_info);
758 static void device_free_td1_ring(struct vnt_private *priv)
762 for (i = 0; i < priv->opts.tx_descs[1]; i++) {
763 struct vnt_tx_desc *desc = &priv->apTD1Rings[i];
764 struct vnt_td_info *td_info = desc->td_info;
766 dev_kfree_skb(td_info->skb);
767 kfree(desc->td_info);
771 /*-----------------------------------------------------------------*/
773 static int device_rx_srv(struct vnt_private *priv, unsigned int idx)
775 struct vnt_rx_desc *rd;
778 for (rd = priv->pCurrRD[idx];
779 rd->rd0.owner == OWNED_BY_HOST;
784 if (!rd->rd_info->skb)
787 if (vnt_receive_frame(priv, rd)) {
788 if (!device_alloc_rx_buf(priv, rd)) {
789 dev_err(&priv->pcid->dev,
790 "can not allocate rx buf\n");
794 rd->rd0.owner = OWNED_BY_NIC;
797 priv->pCurrRD[idx] = rd;
802 static bool device_alloc_rx_buf(struct vnt_private *priv,
803 struct vnt_rx_desc *rd)
805 struct vnt_rd_info *rd_info = rd->rd_info;
807 rd_info->skb = dev_alloc_skb((int)priv->rx_buf_sz);
812 dma_map_single(&priv->pcid->dev,
813 skb_put(rd_info->skb, skb_tailroom(rd_info->skb)),
814 priv->rx_buf_sz, DMA_FROM_DEVICE);
815 if (dma_mapping_error(&priv->pcid->dev, rd_info->skb_dma)) {
816 dev_kfree_skb(rd_info->skb);
821 *((unsigned int *)&rd->rd0) = 0; /* FIX cast */
823 rd->rd0.res_count = cpu_to_le16(priv->rx_buf_sz);
824 rd->rd0.owner = OWNED_BY_NIC;
825 rd->rd1.req_count = cpu_to_le16(priv->rx_buf_sz);
826 rd->buff_addr = cpu_to_le32(rd_info->skb_dma);
831 static void device_free_rx_buf(struct vnt_private *priv,
832 struct vnt_rx_desc *rd)
834 struct vnt_rd_info *rd_info = rd->rd_info;
836 dma_unmap_single(&priv->pcid->dev, rd_info->skb_dma,
837 priv->rx_buf_sz, DMA_FROM_DEVICE);
838 dev_kfree_skb(rd_info->skb);
841 static const u8 fallback_rate0[5][5] = {
842 {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
843 {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
844 {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
845 {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
846 {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
849 static const u8 fallback_rate1[5][5] = {
850 {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
851 {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
852 {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
853 {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M},
854 {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M}
857 static int vnt_int_report_rate(struct vnt_private *priv,
858 struct vnt_td_info *context, u8 tsr0, u8 tsr1)
860 struct vnt_tx_fifo_head *fifo_head;
861 struct ieee80211_tx_info *info;
862 struct ieee80211_rate *rate;
864 u8 tx_retry = (tsr0 & TSR0_NCR);
873 fifo_head = (struct vnt_tx_fifo_head *)context->buf;
874 fb_option = (le16_to_cpu(fifo_head->fifo_ctl) &
875 (FIFOCTL_AUTO_FB_0 | FIFOCTL_AUTO_FB_1));
877 info = IEEE80211_SKB_CB(context->skb);
878 idx = info->control.rates[0].idx;
880 if (fb_option && !(tsr1 & TSR1_TERR)) {
884 rate = ieee80211_get_tx_rate(priv->hw, info);
885 tx_rate = rate->hw_value - RATE_18M;
890 if (fb_option & FIFOCTL_AUTO_FB_0)
891 tx_rate = fallback_rate0[tx_rate][retry];
892 else if (fb_option & FIFOCTL_AUTO_FB_1)
893 tx_rate = fallback_rate1[tx_rate][retry];
895 if (info->band == NL80211_BAND_5GHZ)
896 idx = tx_rate - RATE_6M;
901 ieee80211_tx_info_clear_status(info);
903 info->status.rates[0].count = tx_retry;
905 if (!(tsr1 & TSR1_TERR)) {
906 info->status.rates[0].idx = idx;
908 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
909 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
911 info->flags |= IEEE80211_TX_STAT_ACK;
917 static int device_tx_srv(struct vnt_private *priv, unsigned int idx)
919 struct vnt_tx_desc *desc;
921 unsigned char byTsr0;
922 unsigned char byTsr1;
924 for (desc = priv->apTailTD[idx]; priv->iTDUsed[idx] > 0; desc = desc->next) {
925 if (desc->td0.owner == OWNED_BY_NIC)
930 byTsr0 = desc->td0.tsr0;
931 byTsr1 = desc->td0.tsr1;
933 /* Only the status of first TD in the chain is correct */
934 if (desc->td1.tcr & TCR_STP) {
935 if ((desc->td_info->flags & TD_FLAGS_NETIF_SKB) != 0) {
936 if (!(byTsr1 & TSR1_TERR)) {
938 pr_debug(" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X]\n",
943 pr_debug(" Tx[%d] dropped & tsr1[%02X] tsr0[%02X]\n",
944 (int)idx, byTsr1, byTsr0);
948 if (byTsr1 & TSR1_TERR) {
949 if ((desc->td_info->flags & TD_FLAGS_PRIV_SKB) != 0) {
950 pr_debug(" Tx[%d] fail has error. tsr1[%02X] tsr0[%02X]\n",
951 (int)idx, byTsr1, byTsr0);
955 vnt_int_report_rate(priv, desc->td_info, byTsr0, byTsr1);
957 device_free_tx_buf(priv, desc);
958 priv->iTDUsed[idx]--;
962 priv->apTailTD[idx] = desc;
967 static void device_error(struct vnt_private *priv, unsigned short status)
969 if (status & ISR_FETALERR) {
970 dev_err(&priv->pcid->dev, "Hardware fatal error\n");
977 static void device_free_tx_buf(struct vnt_private *priv,
978 struct vnt_tx_desc *desc)
980 struct vnt_td_info *td_info = desc->td_info;
981 struct sk_buff *skb = td_info->skb;
984 ieee80211_tx_status_irqsafe(priv->hw, skb);
990 static void vnt_check_bb_vga(struct vnt_private *priv)
995 if (!priv->bUpdateBBVGA)
998 if (priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
1001 if (!(priv->vif->cfg.assoc && priv->current_rssi))
1004 RFvRSSITodBm(priv, (u8)priv->current_rssi, &dbm);
1006 for (i = 0; i < BB_VGA_LEVEL; i++) {
1007 if (dbm < priv->dbm_threshold[i]) {
1008 priv->byBBVGANew = priv->abyBBVGA[i];
1013 if (priv->byBBVGANew == priv->byBBVGACurrent) {
1014 priv->uBBVGADiffCount = 1;
1018 priv->uBBVGADiffCount++;
1020 if (priv->uBBVGADiffCount == 1) {
1021 /* first VGA diff gain */
1022 bb_set_vga_gain_offset(priv, priv->byBBVGANew);
1024 dev_dbg(&priv->pcid->dev,
1025 "First RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
1026 (int)dbm, priv->byBBVGANew,
1027 priv->byBBVGACurrent,
1028 (int)priv->uBBVGADiffCount);
1031 if (priv->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD) {
1032 dev_dbg(&priv->pcid->dev,
1033 "RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
1034 (int)dbm, priv->byBBVGANew,
1035 priv->byBBVGACurrent,
1036 (int)priv->uBBVGADiffCount);
1038 bb_set_vga_gain_offset(priv, priv->byBBVGANew);
1042 static void vnt_interrupt_process(struct vnt_private *priv)
1044 struct ieee80211_low_level_stats *low_stats = &priv->low_stats;
1048 unsigned long flags;
1050 isr = ioread32(priv->port_offset + MAC_REG_ISR);
1055 if (isr == 0xffffffff) {
1056 pr_debug("isr = 0xffff\n");
1060 spin_lock_irqsave(&priv->lock, flags);
1062 /* Read low level stats */
1063 mib_counter = ioread32(priv->port_offset + MAC_REG_MIBCNTR);
1065 low_stats->dot11RTSSuccessCount += mib_counter & 0xff;
1066 low_stats->dot11RTSFailureCount += (mib_counter >> 8) & 0xff;
1067 low_stats->dot11ACKFailureCount += (mib_counter >> 16) & 0xff;
1068 low_stats->dot11FCSErrorCount += (mib_counter >> 24) & 0xff;
1072 * Must do this after doing rx/tx, cause ISR bit is slow
1073 * than RD/TD write back
1074 * update ISR counter
1076 while (isr && priv->vif) {
1077 iowrite32(isr, priv->port_offset + MAC_REG_ISR);
1079 if (isr & ISR_FETALERR) {
1080 pr_debug(" ISR_FETALERR\n");
1081 iowrite8(0, priv->port_offset + MAC_REG_SOFTPWRCTL);
1082 iowrite16(SOFTPWRCTL_SWPECTI, priv->port_offset + MAC_REG_SOFTPWRCTL);
1083 device_error(priv, isr);
1086 if (isr & ISR_TBTT) {
1087 if (priv->op_mode != NL80211_IFTYPE_ADHOC)
1088 vnt_check_bb_vga(priv);
1090 priv->bBeaconSent = false;
1091 if (priv->bEnablePSMode)
1092 PSbIsNextTBTTWakeUp((void *)priv);
1094 if ((priv->op_mode == NL80211_IFTYPE_AP ||
1095 priv->op_mode == NL80211_IFTYPE_ADHOC) &&
1096 priv->vif->bss_conf.enable_beacon)
1097 MACvOneShotTimer1MicroSec(priv,
1098 (priv->vif->bss_conf.beacon_int -
1099 MAKE_BEACON_RESERVED) << 10);
1101 /* TODO: adhoc PS mode */
1104 if (isr & ISR_BNTX) {
1105 if (priv->op_mode == NL80211_IFTYPE_ADHOC) {
1106 priv->bIsBeaconBufReadySet = false;
1107 priv->cbBeaconBufReadySetCnt = 0;
1110 priv->bBeaconSent = true;
1113 if (isr & ISR_RXDMA0)
1114 max_count += device_rx_srv(priv, TYPE_RXDMA0);
1116 if (isr & ISR_RXDMA1)
1117 max_count += device_rx_srv(priv, TYPE_RXDMA1);
1119 if (isr & ISR_TXDMA0)
1120 max_count += device_tx_srv(priv, TYPE_TXDMA0);
1122 if (isr & ISR_AC0DMA)
1123 max_count += device_tx_srv(priv, TYPE_AC0DMA);
1125 if (isr & ISR_SOFTTIMER1) {
1126 if (priv->vif->bss_conf.enable_beacon)
1127 vnt_beacon_make(priv, priv->vif);
1130 /* If both buffers available wake the queue */
1131 if (AVAIL_TD(priv, TYPE_TXDMA0) &&
1132 AVAIL_TD(priv, TYPE_AC0DMA) &&
1133 ieee80211_queue_stopped(priv->hw, 0))
1134 ieee80211_wake_queues(priv->hw);
1136 isr = ioread32(priv->port_offset + MAC_REG_ISR);
1138 MACvReceive0(priv->port_offset);
1139 MACvReceive1(priv->port_offset);
1141 if (max_count > priv->opts.int_works)
1145 spin_unlock_irqrestore(&priv->lock, flags);
1148 static void vnt_interrupt_work(struct work_struct *work)
1150 struct vnt_private *priv =
1151 container_of(work, struct vnt_private, interrupt_work);
1154 vnt_interrupt_process(priv);
1156 iowrite32(IMR_MASK_VALUE, priv->port_offset + MAC_REG_IMR);
1159 static irqreturn_t vnt_interrupt(int irq, void *arg)
1161 struct vnt_private *priv = arg;
1163 schedule_work(&priv->interrupt_work);
1165 iowrite32(0, priv->port_offset + MAC_REG_IMR);
1170 static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1172 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1173 struct vnt_tx_desc *head_td;
1175 unsigned long flags;
1177 spin_lock_irqsave(&priv->lock, flags);
1179 if (ieee80211_is_data(hdr->frame_control))
1180 dma_idx = TYPE_AC0DMA;
1182 dma_idx = TYPE_TXDMA0;
1184 if (AVAIL_TD(priv, dma_idx) < 1) {
1185 spin_unlock_irqrestore(&priv->lock, flags);
1186 ieee80211_stop_queues(priv->hw);
1190 head_td = priv->apCurrTD[dma_idx];
1192 head_td->td1.tcr = 0;
1194 head_td->td_info->skb = skb;
1196 if (dma_idx == TYPE_AC0DMA)
1197 head_td->td_info->flags = TD_FLAGS_NETIF_SKB;
1199 priv->apCurrTD[dma_idx] = head_td->next;
1201 spin_unlock_irqrestore(&priv->lock, flags);
1203 vnt_generate_fifo_header(priv, dma_idx, head_td, skb);
1205 spin_lock_irqsave(&priv->lock, flags);
1207 priv->bPWBitOn = false;
1209 /* Set TSR1 & ReqCount in TxDescHead */
1210 head_td->td1.tcr |= (TCR_STP | TCR_EDP | EDMSDU);
1211 head_td->td1.req_count = cpu_to_le16(head_td->td_info->req_count);
1213 head_td->buff_addr = cpu_to_le32(head_td->td_info->buf_dma);
1215 /* Poll Transmit the adapter */
1217 head_td->td0.owner = OWNED_BY_NIC;
1218 wmb(); /* second memory barrier */
1220 if (head_td->td_info->flags & TD_FLAGS_NETIF_SKB)
1221 MACvTransmitAC0(priv->port_offset);
1223 MACvTransmit0(priv->port_offset);
1225 priv->iTDUsed[dma_idx]++;
1227 spin_unlock_irqrestore(&priv->lock, flags);
1232 static void vnt_tx_80211(struct ieee80211_hw *hw,
1233 struct ieee80211_tx_control *control,
1234 struct sk_buff *skb)
1236 struct vnt_private *priv = hw->priv;
1238 if (vnt_tx_packet(priv, skb))
1239 ieee80211_free_txskb(hw, skb);
1242 static int vnt_start(struct ieee80211_hw *hw)
1244 struct vnt_private *priv = hw->priv;
1247 priv->rx_buf_sz = PKT_BUF_SZ;
1248 if (!device_init_rings(priv))
1251 ret = request_irq(priv->pcid->irq, vnt_interrupt,
1252 IRQF_SHARED, "vt6655", priv);
1254 dev_dbg(&priv->pcid->dev, "failed to start irq\n");
1255 goto err_free_rings;
1258 dev_dbg(&priv->pcid->dev, "call device init rd0 ring\n");
1259 ret = device_init_rd0_ring(priv);
1262 ret = device_init_rd1_ring(priv);
1264 goto err_free_rd0_ring;
1265 ret = device_init_td0_ring(priv);
1267 goto err_free_rd1_ring;
1268 ret = device_init_td1_ring(priv);
1270 goto err_free_td0_ring;
1272 device_init_registers(priv);
1274 dev_dbg(&priv->pcid->dev, "enable MAC interrupt\n");
1275 iowrite32(IMR_MASK_VALUE, priv->port_offset + MAC_REG_IMR);
1277 ieee80211_wake_queues(hw);
1282 device_free_td0_ring(priv);
1284 device_free_rd1_ring(priv);
1286 device_free_rd0_ring(priv);
1288 free_irq(priv->pcid->irq, priv);
1290 device_free_rings(priv);
1294 static void vnt_stop(struct ieee80211_hw *hw)
1296 struct vnt_private *priv = hw->priv;
1298 ieee80211_stop_queues(hw);
1300 cancel_work_sync(&priv->interrupt_work);
1303 MACbSoftwareReset(priv);
1304 CARDbRadioPowerOff(priv);
1306 device_free_td0_ring(priv);
1307 device_free_td1_ring(priv);
1308 device_free_rd0_ring(priv);
1309 device_free_rd1_ring(priv);
1310 device_free_rings(priv);
1312 free_irq(priv->pcid->irq, priv);
1315 static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1317 struct vnt_private *priv = hw->priv;
1321 switch (vif->type) {
1322 case NL80211_IFTYPE_STATION:
1324 case NL80211_IFTYPE_ADHOC:
1325 vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
1327 vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
1330 case NL80211_IFTYPE_AP:
1331 vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
1333 vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
1340 priv->op_mode = vif->type;
1345 static void vnt_remove_interface(struct ieee80211_hw *hw,
1346 struct ieee80211_vif *vif)
1348 struct vnt_private *priv = hw->priv;
1350 switch (vif->type) {
1351 case NL80211_IFTYPE_STATION:
1353 case NL80211_IFTYPE_ADHOC:
1354 vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
1355 vt6655_mac_reg_bits_off(priv->port_offset,
1356 MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
1357 vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
1359 case NL80211_IFTYPE_AP:
1360 vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
1361 vt6655_mac_reg_bits_off(priv->port_offset,
1362 MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
1363 vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
1369 priv->op_mode = NL80211_IFTYPE_UNSPECIFIED;
1372 static int vnt_config(struct ieee80211_hw *hw, u32 changed)
1374 struct vnt_private *priv = hw->priv;
1375 struct ieee80211_conf *conf = &hw->conf;
1378 if (changed & IEEE80211_CONF_CHANGE_PS) {
1379 if (conf->flags & IEEE80211_CONF_PS)
1380 PSvEnablePowerSaving(priv, conf->listen_interval);
1382 PSvDisablePowerSaving(priv);
1385 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
1386 (conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
1387 set_channel(priv, conf->chandef.chan);
1389 if (conf->chandef.chan->band == NL80211_BAND_5GHZ)
1390 bb_type = BB_TYPE_11A;
1392 bb_type = BB_TYPE_11G;
1394 if (priv->byBBType != bb_type) {
1395 priv->byBBType = bb_type;
1397 CARDbSetPhyParameter(priv, priv->byBBType);
1401 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1402 if (priv->byBBType == BB_TYPE_11B)
1403 priv->wCurrentRate = RATE_1M;
1405 priv->wCurrentRate = RATE_54M;
1407 RFbSetPower(priv, priv->wCurrentRate,
1408 conf->chandef.chan->hw_value);
1414 static void vnt_bss_info_changed(struct ieee80211_hw *hw,
1415 struct ieee80211_vif *vif,
1416 struct ieee80211_bss_conf *conf, u64 changed)
1418 struct vnt_private *priv = hw->priv;
1420 priv->current_aid = vif->cfg.aid;
1422 if (changed & BSS_CHANGED_BSSID && conf->bssid) {
1423 unsigned long flags;
1425 spin_lock_irqsave(&priv->lock, flags);
1427 vt6655_mac_write_bssid_addr(priv->port_offset, conf->bssid);
1429 spin_unlock_irqrestore(&priv->lock, flags);
1432 if (changed & BSS_CHANGED_BASIC_RATES) {
1433 priv->basic_rates = conf->basic_rates;
1435 CARDvUpdateBasicTopRate(priv);
1437 dev_dbg(&priv->pcid->dev,
1438 "basic rates %x\n", conf->basic_rates);
1441 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
1442 if (conf->use_short_preamble) {
1443 MACvEnableBarkerPreambleMd(priv->port_offset);
1444 priv->preamble_type = true;
1446 MACvDisableBarkerPreambleMd(priv->port_offset);
1447 priv->preamble_type = false;
1451 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
1452 if (conf->use_cts_prot)
1453 MACvEnableProtectMD(priv->port_offset);
1455 MACvDisableProtectMD(priv->port_offset);
1458 if (changed & BSS_CHANGED_ERP_SLOT) {
1459 if (conf->use_short_slot)
1460 priv->short_slot_time = true;
1462 priv->short_slot_time = false;
1464 CARDbSetPhyParameter(priv, priv->byBBType);
1465 bb_set_vga_gain_offset(priv, priv->abyBBVGA[0]);
1468 if (changed & BSS_CHANGED_TXPOWER)
1469 RFbSetPower(priv, priv->wCurrentRate,
1470 conf->chandef.chan->hw_value);
1472 if (changed & BSS_CHANGED_BEACON_ENABLED) {
1473 dev_dbg(&priv->pcid->dev,
1474 "Beacon enable %d\n", conf->enable_beacon);
1476 if (conf->enable_beacon) {
1477 vnt_beacon_enable(priv, vif, conf);
1479 vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
1481 vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR,
1486 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
1487 priv->op_mode != NL80211_IFTYPE_AP) {
1488 if (vif->cfg.assoc && conf->beacon_rate) {
1489 CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
1492 CARDbSetBeaconPeriod(priv, conf->beacon_int);
1494 CARDvSetFirstNextTBTT(priv, conf->beacon_int);
1496 iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
1497 iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
1502 static u64 vnt_prepare_multicast(struct ieee80211_hw *hw,
1503 struct netdev_hw_addr_list *mc_list)
1505 struct vnt_private *priv = hw->priv;
1506 struct netdev_hw_addr *ha;
1510 netdev_hw_addr_list_for_each(ha, mc_list) {
1511 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1513 mc_filter |= 1ULL << (bit_nr & 0x3f);
1516 priv->mc_list_count = mc_list->count;
1521 static void vnt_configure(struct ieee80211_hw *hw,
1522 unsigned int changed_flags,
1523 unsigned int *total_flags, u64 multicast)
1525 struct vnt_private *priv = hw->priv;
1528 *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
1530 rx_mode = ioread8(priv->port_offset + MAC_REG_RCR);
1532 dev_dbg(&priv->pcid->dev, "rx mode in = %x\n", rx_mode);
1534 if (changed_flags & FIF_ALLMULTI) {
1535 if (*total_flags & FIF_ALLMULTI) {
1536 unsigned long flags;
1538 spin_lock_irqsave(&priv->lock, flags);
1540 if (priv->mc_list_count > 2) {
1541 MACvSelectPage1(priv->port_offset);
1543 iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0);
1544 iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0 + 4);
1546 MACvSelectPage0(priv->port_offset);
1548 MACvSelectPage1(priv->port_offset);
1550 multicast = le64_to_cpu(multicast);
1551 iowrite32((u32)multicast, priv->port_offset + MAC_REG_MAR0);
1552 iowrite32((u32)(multicast >> 32),
1553 priv->port_offset + MAC_REG_MAR0 + 4);
1555 MACvSelectPage0(priv->port_offset);
1558 spin_unlock_irqrestore(&priv->lock, flags);
1560 rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
1562 rx_mode &= ~(RCR_MULTICAST | RCR_BROADCAST);
1566 if (changed_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)) {
1567 rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
1569 if (*total_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC))
1570 rx_mode &= ~RCR_BSSID;
1572 rx_mode |= RCR_BSSID;
1575 iowrite8(rx_mode, priv->port_offset + MAC_REG_RCR);
1577 dev_dbg(&priv->pcid->dev, "rx mode out= %x\n", rx_mode);
1580 static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1581 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
1582 struct ieee80211_key_conf *key)
1584 struct vnt_private *priv = hw->priv;
1588 if (vnt_set_keys(hw, sta, vif, key))
1592 if (test_bit(key->hw_key_idx, &priv->key_entry_inuse))
1593 clear_bit(key->hw_key_idx, &priv->key_entry_inuse);
1602 static int vnt_get_stats(struct ieee80211_hw *hw,
1603 struct ieee80211_low_level_stats *stats)
1605 struct vnt_private *priv = hw->priv;
1607 memcpy(stats, &priv->low_stats, sizeof(*stats));
1612 static u64 vnt_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1614 struct vnt_private *priv = hw->priv;
1617 tsf = vt6655_get_current_tsf(priv);
1622 static void vnt_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1625 struct vnt_private *priv = hw->priv;
1627 CARDvUpdateNextTBTT(priv, tsf, vif->bss_conf.beacon_int);
1630 static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1632 struct vnt_private *priv = hw->priv;
1634 /* reset TSF counter */
1635 iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
1638 static const struct ieee80211_ops vnt_mac_ops = {
1642 .add_interface = vnt_add_interface,
1643 .remove_interface = vnt_remove_interface,
1644 .config = vnt_config,
1645 .bss_info_changed = vnt_bss_info_changed,
1646 .prepare_multicast = vnt_prepare_multicast,
1647 .configure_filter = vnt_configure,
1648 .set_key = vnt_set_key,
1649 .get_stats = vnt_get_stats,
1650 .get_tsf = vnt_get_tsf,
1651 .set_tsf = vnt_set_tsf,
1652 .reset_tsf = vnt_reset_tsf,
1655 static int vnt_init(struct vnt_private *priv)
1657 SET_IEEE80211_PERM_ADDR(priv->hw, priv->abyCurrentNetAddr);
1659 vnt_init_bands(priv);
1661 if (ieee80211_register_hw(priv->hw))
1664 priv->mac_hw = true;
1666 CARDbRadioPowerOff(priv);
1672 vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
1674 struct vnt_private *priv;
1675 struct ieee80211_hw *hw;
1676 struct wiphy *wiphy;
1679 dev_notice(&pcid->dev,
1680 "%s Ver. %s\n", DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
1682 dev_notice(&pcid->dev,
1683 "Copyright (c) 2003 VIA Networking Technologies, Inc.\n");
1685 hw = ieee80211_alloc_hw(sizeof(*priv), &vnt_mac_ops);
1687 dev_err(&pcid->dev, "could not register ieee80211_hw\n");
1694 spin_lock_init(&priv->lock);
1698 SET_IEEE80211_DEV(priv->hw, &pcid->dev);
1700 if (pci_enable_device(pcid)) {
1701 device_free_info(priv);
1706 "Before get pci_info memaddr is %x\n", priv->memaddr);
1708 pci_set_master(pcid);
1710 priv->memaddr = pci_resource_start(pcid, 0);
1711 priv->ioaddr = pci_resource_start(pcid, 1);
1712 priv->port_offset = ioremap(priv->memaddr & PCI_BASE_ADDRESS_MEM_MASK,
1714 if (!priv->port_offset) {
1715 dev_err(&pcid->dev, ": Failed to IO remapping ..\n");
1716 device_free_info(priv);
1720 rc = pci_request_regions(pcid, DEVICE_NAME);
1722 dev_err(&pcid->dev, ": Failed to find PCI device\n");
1723 device_free_info(priv);
1727 if (dma_set_mask(&pcid->dev, DMA_BIT_MASK(32))) {
1728 dev_err(&pcid->dev, ": Failed to set dma 32 bit mask\n");
1729 device_free_info(priv);
1733 INIT_WORK(&priv->interrupt_work, vnt_interrupt_work);
1736 if (!MACbSoftwareReset(priv)) {
1737 dev_err(&pcid->dev, ": Failed to access MAC hardware..\n");
1738 device_free_info(priv);
1741 /* initial to reload eeprom */
1742 MACvInitialize(priv);
1743 vt6655_mac_read_ether_addr(priv->port_offset, priv->abyCurrentNetAddr);
1746 priv->byRFType = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_RFTYPE);
1747 priv->byRFType &= RF_MASK;
1749 dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType);
1751 device_get_options(priv);
1752 device_set_options(priv);
1754 wiphy = priv->hw->wiphy;
1756 wiphy->frag_threshold = FRAG_THRESH_DEF;
1757 wiphy->rts_threshold = RTS_THRESH_DEF;
1758 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1759 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
1761 ieee80211_hw_set(priv->hw, TIMING_BEACON_ONLY);
1762 ieee80211_hw_set(priv->hw, SIGNAL_DBM);
1763 ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
1764 ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
1765 ieee80211_hw_set(priv->hw, SUPPORTS_PS);
1767 priv->hw->max_signal = 100;
1769 if (vnt_init(priv)) {
1770 device_free_info(priv);
1774 device_print_info(priv);
1775 pci_set_drvdata(pcid, priv);
1780 /*------------------------------------------------------------------*/
1782 static int __maybe_unused vt6655_suspend(struct device *dev_d)
1784 struct vnt_private *priv = dev_get_drvdata(dev_d);
1785 unsigned long flags;
1787 spin_lock_irqsave(&priv->lock, flags);
1791 spin_unlock_irqrestore(&priv->lock, flags);
1796 static int __maybe_unused vt6655_resume(struct device *dev_d)
1798 device_wakeup_disable(dev_d);
1803 MODULE_DEVICE_TABLE(pci, vt6655_pci_id_table);
1805 static SIMPLE_DEV_PM_OPS(vt6655_pm_ops, vt6655_suspend, vt6655_resume);
1807 static struct pci_driver device_driver = {
1808 .name = DEVICE_NAME,
1809 .id_table = vt6655_pci_id_table,
1810 .probe = vt6655_probe,
1811 .remove = vt6655_remove,
1812 .driver.pm = &vt6655_pm_ops,
1815 module_pci_driver(device_driver);