1 /******************************************************************************
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 The full GNU General Public License is included in this distribution in the
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************
27 Few modifications for Realtek's Wi-Fi drivers by
28 Andrea Merello <andrea.merello@gmail.com>
30 A special thanks goes to Realtek for their support !
32 ******************************************************************************/
34 #include <linux/compiler.h>
35 #include <linux/errno.h>
36 #include <linux/if_arp.h>
37 #include <linux/in6.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/netdevice.h>
43 #include <linux/pci.h>
44 #include <linux/proc_fs.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/tcp.h>
48 #include <linux/types.h>
49 #include <linux/wireless.h>
50 #include <linux/etherdevice.h>
51 #include <linux/uaccess.h>
52 #include <linux/if_vlan.h>
62 802.11 frame_control for data frames - 2 bytes
63 ,-----------------------------------------------------------------------------------------.
64 bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
65 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
66 val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
67 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
68 desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep |
69 | | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | |
70 '-----------------------------------------------------------------------------------------'
74 ,--------- 'ctrl' expands to >-----------'
76 ,--'---,-------------------------------------------------------------.
77 Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
78 |------|------|---------|---------|---------|------|---------|------|
79 Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
80 | | tion | (BSSID) | | | ence | data | |
81 `--------------------------------------------------| |------'
82 Total: 28 non-data bytes `----.----'
84 .- 'Frame data' expands to <---------------------------'
87 ,---------------------------------------------------.
88 Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
89 |------|------|---------|----------|------|---------|
90 Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
91 | DSAP | SSAP | | | | Packet |
92 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
93 `-----------------------------------------| |
94 Total: 8 non-data bytes `----.----'
96 .- 'IP Packet' expands, if WEP enabled, to <--'
99 ,-----------------------.
100 Bytes | 4 | 0-2296 | 4 |
101 |-----|-----------|-----|
102 Desc. | IV | Encrypted | ICV |
104 `-----------------------'
105 Total: 8 non-data bytes
108 802.3 Ethernet Data Frame
110 ,-----------------------------------------.
111 Bytes | 6 | 6 | 2 | Variable | 4 |
112 |-------|-------|------|-----------|------|
113 Desc. | Dest. | Source| Type | IP Packet | fcs |
115 `-----------------------------------------'
116 Total: 18 non-data bytes
118 In the event that fragmentation is required, the incoming payload is split into
119 N parts of size ieee->fts. The first fragment contains the SNAP header and the
120 remaining packets are just data.
122 If encryption is enabled, each fragment payload size is reduced by enough space
123 to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
124 So if you have 1500 bytes of payload with ieee->fts set to 500 without
125 encryption it will take 3 frames. With WEP it will take 4 frames as the
126 payload of each frame is reduced to 492 bytes.
132 * | ETHERNET HEADER ,-<-- PAYLOAD
133 * | | 14 bytes from skb->data
134 * | 2 bytes for Type --> ,T. | (sizeof ethhdr)
136 * |,-Dest.--. ,--Src.---. | | |
137 * | 6 bytes| | 6 bytes | | | |
140 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
143 * | | | | `T' <---- 2 bytes for Type
145 * | | '---SNAP--' <-------- 6 bytes for SNAP
147 * `-IV--' <-------------------- 4 bytes for IV (WEP)
153 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
154 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
156 inline int rtllib_put_snap(u8 *data, u16 h_proto)
158 struct rtllib_snap_hdr *snap;
161 snap = (struct rtllib_snap_hdr *)data;
166 if (h_proto == 0x8137 || h_proto == 0x80f3)
170 snap->oui[0] = oui[0];
171 snap->oui[1] = oui[1];
172 snap->oui[2] = oui[2];
174 *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
176 return SNAP_SIZE + sizeof(u16);
179 int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
182 struct lib80211_crypt_data *crypt = NULL;
185 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
187 if (!(crypt && crypt->ops)) {
188 netdev_info(ieee->dev, "=========>%s(), crypt is null\n",
192 /* To encrypt, frame format is:
193 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
195 /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
196 * call both MSDU and MPDU encryption functions from here. */
197 atomic_inc(&crypt->refcnt);
199 if (crypt->ops->encrypt_msdu)
200 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
201 if (res == 0 && crypt->ops->encrypt_mpdu)
202 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
204 atomic_dec(&crypt->refcnt);
206 netdev_info(ieee->dev, "%s: Encryption failed: len=%d.\n",
207 ieee->dev->name, frag->len);
208 ieee->ieee_stats.tx_discards++;
216 void rtllib_txb_free(struct rtllib_txb *txb)
223 static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
226 struct rtllib_txb *txb;
229 txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags),
234 memset(txb, 0, sizeof(struct rtllib_txb));
235 txb->nr_frags = nr_frags;
236 txb->frag_size = cpu_to_le16(txb_size);
238 for (i = 0; i < nr_frags; i++) {
239 txb->fragments[i] = dev_alloc_skb(txb_size);
240 if (unlikely(!txb->fragments[i])) {
244 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
246 if (unlikely(i != nr_frags)) {
248 dev_kfree_skb_any(txb->fragments[i--]);
255 static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
260 eth = (struct ethhdr *)skb->data;
261 if (eth->h_proto != htons(ETH_P_IP))
264 RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA, skb->data, skb->len);
266 switch (ip->tos & 0xfc) {
286 static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
288 struct cb_desc *tcb_desc)
290 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
291 struct tx_ts_record *pTxTs = NULL;
292 struct rtllib_hdr_1addr *hdr = (struct rtllib_hdr_1addr *)skb->data;
294 if (rtllib_act_scanning(ieee, false))
297 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
299 if (!IsQoSDataFrame(skb->data))
301 if (is_multicast_ether_addr(hdr->addr1))
304 if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
307 if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
310 if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
312 if (pHTInfo->bCurrentAMPDUEnable) {
313 if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
314 skb->priority, TX_DIR, true)) {
315 netdev_info(ieee->dev, "%s: can't get TS\n", __func__);
318 if (pTxTs->TxAdmittedBARecord.bValid == false) {
319 if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
322 } else if (tcb_desc->bdhcp == 1) {
324 } else if (!pTxTs->bDisable_AddBa) {
325 TsStartAddBaProcess(ieee, pTxTs);
327 goto FORCED_AGG_SETTING;
328 } else if (pTxTs->bUsingBa == false) {
329 if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum,
330 (pTxTs->TxCurSeq+1)%4096))
331 pTxTs->bUsingBa = true;
333 goto FORCED_AGG_SETTING;
335 if (ieee->iw_mode == IW_MODE_INFRA) {
336 tcb_desc->bAMPDUEnable = true;
337 tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
338 tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
342 switch (pHTInfo->ForcedAMPDUMode) {
346 case HT_AGG_FORCE_ENABLE:
347 tcb_desc->bAMPDUEnable = true;
348 tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
349 tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
352 case HT_AGG_FORCE_DISABLE:
353 tcb_desc->bAMPDUEnable = false;
354 tcb_desc->ampdu_density = 0;
355 tcb_desc->ampdu_factor = 0;
360 static void rtllib_qurey_ShortPreambleMode(struct rtllib_device *ieee,
361 struct cb_desc *tcb_desc)
363 tcb_desc->bUseShortPreamble = false;
364 if (tcb_desc->data_rate == 2)
366 else if (ieee->current_network.capability &
367 WLAN_CAPABILITY_SHORT_PREAMBLE)
368 tcb_desc->bUseShortPreamble = true;
371 static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
372 struct cb_desc *tcb_desc)
374 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
376 tcb_desc->bUseShortGI = false;
378 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
381 if (pHTInfo->bForcedShortGI) {
382 tcb_desc->bUseShortGI = true;
386 if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
387 tcb_desc->bUseShortGI = true;
388 else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
389 tcb_desc->bUseShortGI = true;
392 static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
393 struct cb_desc *tcb_desc)
395 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
397 tcb_desc->bPacketBW = false;
399 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
402 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
405 if ((tcb_desc->data_rate & 0x80) == 0)
407 if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz &&
408 !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
409 tcb_desc->bPacketBW = true;
412 static void rtllib_query_protectionmode(struct rtllib_device *ieee,
413 struct cb_desc *tcb_desc,
416 struct rt_hi_throughput *pHTInfo;
418 tcb_desc->bRTSSTBC = false;
419 tcb_desc->bRTSUseShortGI = false;
420 tcb_desc->bCTSEnable = false;
422 tcb_desc->bRTSBW = false;
424 if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
427 if (is_broadcast_ether_addr(skb->data+16))
430 if (ieee->mode < IEEE_N_24G) {
431 if (skb->len > ieee->rts) {
432 tcb_desc->bRTSEnable = true;
433 tcb_desc->rts_rate = MGN_24M;
434 } else if (ieee->current_network.buseprotection) {
435 tcb_desc->bRTSEnable = true;
436 tcb_desc->bCTSEnable = true;
437 tcb_desc->rts_rate = MGN_24M;
442 pHTInfo = ieee->pHTInfo;
445 if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
446 tcb_desc->bCTSEnable = true;
447 tcb_desc->rts_rate = MGN_24M;
448 tcb_desc->bRTSEnable = true;
450 } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS |
451 HT_IOT_ACT_PURE_N_MODE)) {
452 tcb_desc->bRTSEnable = true;
453 tcb_desc->rts_rate = MGN_24M;
456 if (ieee->current_network.buseprotection) {
457 tcb_desc->bRTSEnable = true;
458 tcb_desc->bCTSEnable = true;
459 tcb_desc->rts_rate = MGN_24M;
462 if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
463 u8 HTOpMode = pHTInfo->CurrentOpMode;
465 if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 ||
467 (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
468 tcb_desc->rts_rate = MGN_24M;
469 tcb_desc->bRTSEnable = true;
473 if (skb->len > ieee->rts) {
474 tcb_desc->rts_rate = MGN_24M;
475 tcb_desc->bRTSEnable = true;
478 if (tcb_desc->bAMPDUEnable) {
479 tcb_desc->rts_rate = MGN_24M;
480 tcb_desc->bRTSEnable = false;
485 if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
486 tcb_desc->bUseShortPreamble = true;
487 if (ieee->iw_mode == IW_MODE_MASTER)
491 tcb_desc->bRTSEnable = false;
492 tcb_desc->bCTSEnable = false;
493 tcb_desc->rts_rate = 0;
495 tcb_desc->bRTSBW = false;
499 static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
500 struct cb_desc *tcb_desc)
502 if (ieee->bTxDisableRateFallBack)
503 tcb_desc->bTxDisableRateFallBack = true;
505 if (ieee->bTxUseDriverAssingedRate)
506 tcb_desc->bTxUseDriverAssingedRate = true;
507 if (!tcb_desc->bTxDisableRateFallBack ||
508 !tcb_desc->bTxUseDriverAssingedRate) {
509 if (ieee->iw_mode == IW_MODE_INFRA ||
510 ieee->iw_mode == IW_MODE_ADHOC)
511 tcb_desc->RATRIndex = 0;
515 u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
520 if (is_multicast_ether_addr(dst))
522 if (IsQoSDataFrame(skb->data)) {
523 struct tx_ts_record *pTS = NULL;
525 if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
526 skb->priority, TX_DIR, true))
528 seqnum = pTS->TxCurSeq;
529 pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
535 static int wme_downgrade_ac(struct sk_buff *skb)
537 switch (skb->priority) {
540 skb->priority = 5; /* VO -> VI */
544 skb->priority = 3; /* VI -> BE */
548 skb->priority = 1; /* BE -> BK */
555 static u8 rtllib_current_rate(struct rtllib_device *ieee)
557 if (ieee->mode & IEEE_MODE_MASK)
560 if (ieee->HTCurrentOperaRate)
561 return ieee->HTCurrentOperaRate;
563 return ieee->rate & 0x7F;
566 int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
568 struct rtllib_device *ieee = (struct rtllib_device *)
569 netdev_priv_rsl(dev);
570 struct rtllib_txb *txb = NULL;
571 struct rtllib_hdr_3addrqos *frag_hdr;
572 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
574 struct net_device_stats *stats = &ieee->stats;
575 int ether_type = 0, encrypt;
576 int bytes, fc, qos_ctl = 0, hdr_len;
577 struct sk_buff *skb_frag;
578 struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
583 u8 dest[ETH_ALEN], src[ETH_ALEN];
584 int qos_actived = ieee->current_network.qos_data.active;
585 struct lib80211_crypt_data *crypt = NULL;
586 struct cb_desc *tcb_desc;
587 u8 bIsMulticast = false;
591 spin_lock_irqsave(&ieee->lock, flags);
593 /* If there is no driver handler to take the TXB, don't bother
595 if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
596 IEEE_SOFTMAC_TX_QUEUE)) ||
597 ((!ieee->softmac_data_hard_start_xmit &&
598 (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
599 netdev_warn(ieee->dev, "No xmit handler.\n");
604 if (likely(ieee->raw_tx == 0)) {
605 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
606 netdev_warn(ieee->dev, "skb too small (%d).\n",
610 /* Save source and destination addresses */
611 memcpy(dest, skb->data, ETH_ALEN);
612 memcpy(src, skb->data+ETH_ALEN, ETH_ALEN);
614 memset(skb->cb, 0, sizeof(skb->cb));
615 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
617 if (ieee->iw_mode == IW_MODE_MONITOR) {
618 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
619 if (unlikely(!txb)) {
620 netdev_warn(ieee->dev,
621 "Could not allocate TXB\n");
626 txb->payload_size = cpu_to_le16(skb->len);
627 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
633 if (skb->len > 282) {
634 if (ETH_P_IP == ether_type) {
635 const struct iphdr *ip = (struct iphdr *)
636 ((u8 *)skb->data+14);
637 if (IPPROTO_UDP == ip->protocol) {
640 udp = (struct udphdr *)((u8 *)ip +
642 if (((((u8 *)udp)[1] == 68) &&
643 (((u8 *)udp)[3] == 67)) ||
644 ((((u8 *)udp)[1] == 67) &&
645 (((u8 *)udp)[3] == 68))) {
647 ieee->LPSDelayCnt = 200;
650 } else if (ETH_P_ARP == ether_type) {
651 netdev_info(ieee->dev,
652 "=================>DHCP Protocol start tx ARP pkt!!\n");
655 ieee->current_network.tim.tim_count;
659 skb->priority = rtllib_classify(skb, IsAmsdu);
660 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
661 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
662 ieee->host_encrypt && crypt && crypt->ops;
663 if (!encrypt && ieee->ieee802_1x &&
664 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
668 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
669 struct eapol *eap = (struct eapol *)(skb->data +
670 sizeof(struct ethhdr) - SNAP_SIZE -
672 RTLLIB_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
673 eap_get_type(eap->type));
676 /* Advance the SKB to the start of the payload */
677 skb_pull(skb, sizeof(struct ethhdr));
679 /* Determine total amount of storage required for TXB packets */
680 bytes = skb->len + SNAP_SIZE + sizeof(u16);
683 fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
685 fc = RTLLIB_FTYPE_DATA;
688 fc |= RTLLIB_STYPE_QOS_DATA;
690 fc |= RTLLIB_STYPE_DATA;
692 if (ieee->iw_mode == IW_MODE_INFRA) {
693 fc |= RTLLIB_FCTL_TODS;
694 /* To DS: Addr1 = BSSID, Addr2 = SA,
696 memcpy(&header.addr1, ieee->current_network.bssid,
698 memcpy(&header.addr2, &src, ETH_ALEN);
700 memcpy(&header.addr3,
701 ieee->current_network.bssid, ETH_ALEN);
703 memcpy(&header.addr3, &dest, ETH_ALEN);
704 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
705 /* not From/To DS: Addr1 = DA, Addr2 = SA,
707 memcpy(&header.addr1, dest, ETH_ALEN);
708 memcpy(&header.addr2, src, ETH_ALEN);
709 memcpy(&header.addr3, ieee->current_network.bssid,
713 bIsMulticast = is_multicast_ether_addr(header.addr1);
715 header.frame_ctl = cpu_to_le16(fc);
717 /* Determine fragmentation size based on destination (multicast
718 * and broadcast are not fragmented) */
720 frag_size = MAX_FRAG_THRESHOLD;
721 qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
723 frag_size = ieee->fts;
728 hdr_len = RTLLIB_3ADDR_LEN + 2;
730 /* in case we are a client verify acm is not set for this ac */
731 while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
732 netdev_info(ieee->dev, "skb->priority = %x\n",
734 if (wme_downgrade_ac(skb))
736 netdev_info(ieee->dev, "converted skb->priority = %x\n",
739 qos_ctl |= skb->priority;
740 header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
742 hdr_len = RTLLIB_3ADDR_LEN;
744 /* Determine amount of payload per fragment. Regardless of if
745 * this stack is providing the full 802.11 header, one will
746 * eventually be affixed to this fragment -- so we must account
747 * for it when determining the amount of payload space. */
748 bytes_per_frag = frag_size - hdr_len;
750 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
751 bytes_per_frag -= RTLLIB_FCS_LEN;
753 /* Each fragment may need to have room for encrypting
756 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
757 crypt->ops->extra_mpdu_postfix_len +
758 crypt->ops->extra_msdu_prefix_len +
759 crypt->ops->extra_msdu_postfix_len;
761 /* Number of fragments is the total bytes_per_frag /
762 * payload_per_fragment */
763 nr_frags = bytes / bytes_per_frag;
764 bytes_last_frag = bytes % bytes_per_frag;
768 bytes_last_frag = bytes_per_frag;
770 /* When we allocate the TXB we allocate enough space for the
771 * reserve and full fragment bytes (bytes_per_frag doesn't
772 * include prefix, postfix, header, FCS, etc.) */
773 txb = rtllib_alloc_txb(nr_frags, frag_size +
774 ieee->tx_headroom, GFP_ATOMIC);
775 if (unlikely(!txb)) {
776 netdev_warn(ieee->dev, "Could not allocate TXB\n");
779 txb->encrypted = encrypt;
780 txb->payload_size = cpu_to_le16(bytes);
783 txb->queue_index = UP2AC(skb->priority);
785 txb->queue_index = WME_AC_BE;
787 for (i = 0; i < nr_frags; i++) {
788 skb_frag = txb->fragments[i];
789 tcb_desc = (struct cb_desc *)(skb_frag->cb +
792 skb_frag->priority = skb->priority;
793 tcb_desc->queue_index = UP2AC(skb->priority);
795 skb_frag->priority = WME_AC_BE;
796 tcb_desc->queue_index = WME_AC_BE;
798 skb_reserve(skb_frag, ieee->tx_headroom);
801 if (ieee->hwsec_active)
802 tcb_desc->bHwSec = 1;
804 tcb_desc->bHwSec = 0;
805 skb_reserve(skb_frag,
806 crypt->ops->extra_mpdu_prefix_len +
807 crypt->ops->extra_msdu_prefix_len);
809 tcb_desc->bHwSec = 0;
811 frag_hdr = (struct rtllib_hdr_3addrqos *)
812 skb_put(skb_frag, hdr_len);
813 memcpy(frag_hdr, &header, hdr_len);
815 /* If this is not the last fragment, then add the
816 * MOREFRAGS bit to the frame control */
817 if (i != nr_frags - 1) {
818 frag_hdr->frame_ctl = cpu_to_le16(
819 fc | RTLLIB_FCTL_MOREFRAGS);
820 bytes = bytes_per_frag;
823 /* The last fragment has the remaining length */
824 bytes = bytes_last_frag;
826 if ((qos_actived) && (!bIsMulticast)) {
828 cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
831 cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl)<<4 | i);
834 cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
836 /* Put a SNAP header on the first fragment */
839 skb_put(skb_frag, SNAP_SIZE +
840 sizeof(u16)), ether_type);
841 bytes -= SNAP_SIZE + sizeof(u16);
844 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
846 /* Advance the SKB... */
847 skb_pull(skb, bytes);
849 /* Encryption routine will move the header forward in
850 * order to insert the IV between the header and the
853 rtllib_encrypt_fragment(ieee, skb_frag,
856 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
857 skb_put(skb_frag, 4);
860 if ((qos_actived) && (!bIsMulticast)) {
861 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
862 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
864 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
866 if (ieee->seq_ctrl[0] == 0xFFF)
867 ieee->seq_ctrl[0] = 0;
872 if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
873 netdev_warn(ieee->dev, "skb too small (%d).\n",
878 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
880 netdev_warn(ieee->dev, "Could not allocate TXB\n");
885 txb->payload_size = cpu_to_le16(skb->len);
886 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
892 struct cb_desc *tcb_desc = (struct cb_desc *)
893 (txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
894 tcb_desc->bTxEnableFwCalcDur = 1;
895 tcb_desc->priority = skb->priority;
897 if (ether_type == ETH_P_PAE) {
898 if (ieee->pHTInfo->IOTAction &
899 HT_IOT_ACT_WA_IOT_Broadcom) {
900 tcb_desc->data_rate =
901 MgntQuery_TxRateExcludeCCKRates(ieee);
902 tcb_desc->bTxDisableRateFallBack = false;
904 tcb_desc->data_rate = ieee->basic_rate;
905 tcb_desc->bTxDisableRateFallBack = 1;
909 tcb_desc->RATRIndex = 7;
910 tcb_desc->bTxUseDriverAssingedRate = 1;
912 if (is_multicast_ether_addr(header.addr1))
913 tcb_desc->bMulticast = 1;
914 if (is_broadcast_ether_addr(header.addr1))
915 tcb_desc->bBroadcast = 1;
916 rtllib_txrate_selectmode(ieee, tcb_desc);
917 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
918 tcb_desc->data_rate = ieee->basic_rate;
920 tcb_desc->data_rate = rtllib_current_rate(ieee);
923 if (ieee->pHTInfo->IOTAction &
924 HT_IOT_ACT_WA_IOT_Broadcom) {
925 tcb_desc->data_rate =
926 MgntQuery_TxRateExcludeCCKRates(ieee);
927 tcb_desc->bTxDisableRateFallBack = false;
929 tcb_desc->data_rate = MGN_1M;
930 tcb_desc->bTxDisableRateFallBack = 1;
934 tcb_desc->RATRIndex = 7;
935 tcb_desc->bTxUseDriverAssingedRate = 1;
939 rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
940 rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
942 rtllib_query_HTCapShortGI(ieee, tcb_desc);
943 rtllib_query_BandwidthMode(ieee, tcb_desc);
944 rtllib_query_protectionmode(ieee, tcb_desc,
948 spin_unlock_irqrestore(&ieee->lock, flags);
949 dev_kfree_skb_any(skb);
951 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
952 dev->stats.tx_packets++;
953 dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
954 rtllib_softmac_xmit(txb, ieee);
956 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
958 stats->tx_bytes += le16_to_cpu(txb->payload_size);
961 rtllib_txb_free(txb);
968 spin_unlock_irqrestore(&ieee->lock, flags);
969 netif_stop_queue(dev);
974 int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
976 memset(skb->cb, 0, sizeof(skb->cb));
977 return rtllib_xmit_inter(skb, dev);
979 EXPORT_SYMBOL(rtllib_xmit);