3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param;
59 static bool bug_on_recovery;
60 static bool no_recovery;
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif,
64 bool reset_tx_queues);
65 static void wl1271_op_stop(struct ieee80211_hw *hw);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 struct wl12xx_vif *wlvif)
73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
82 ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
86 wl12xx_croc(wl, wlvif->role_id);
88 wl1271_info("Association completed.");
92 static int wl1271_reg_notify(struct wiphy *wiphy,
93 struct regulatory_request *request)
95 struct ieee80211_supported_band *band;
96 struct ieee80211_channel *ch;
99 band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 for (i = 0; i < band->n_channels; i++) {
101 ch = &band->channels[i];
102 if (ch->flags & IEEE80211_CHAN_DISABLED)
105 if (ch->flags & IEEE80211_CHAN_RADAR)
106 ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 IEEE80211_CHAN_PASSIVE_SCAN;
114 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
119 /* we should hold wl->mutex */
120 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
133 * this function is being called when the rx_streaming interval
134 * has beed changed or rx_streaming should be disabled
136 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
139 int period = wl->conf.rx_streaming.interval;
141 /* don't reconfigure if rx_streaming is disabled */
142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
145 /* reconfigure/disable according to new streaming_period */
147 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
148 (wl->conf.rx_streaming.always ||
149 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
150 ret = wl1271_set_rx_streaming(wl, wlvif, true);
152 ret = wl1271_set_rx_streaming(wl, wlvif, false);
153 /* don't cancel_work_sync since we might deadlock */
154 del_timer_sync(&wlvif->rx_streaming_timer);
160 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
163 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
164 rx_streaming_enable_work);
165 struct wl1271 *wl = wlvif->wl;
167 mutex_lock(&wl->mutex);
169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
171 (!wl->conf.rx_streaming.always &&
172 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
175 if (!wl->conf.rx_streaming.interval)
178 ret = wl1271_ps_elp_wakeup(wl);
182 ret = wl1271_set_rx_streaming(wl, wlvif, true);
186 /* stop it after some time of inactivity */
187 mod_timer(&wlvif->rx_streaming_timer,
188 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
191 wl1271_ps_elp_sleep(wl);
193 mutex_unlock(&wl->mutex);
196 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
199 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
200 rx_streaming_disable_work);
201 struct wl1271 *wl = wlvif->wl;
203 mutex_lock(&wl->mutex);
205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
208 ret = wl1271_ps_elp_wakeup(wl);
212 ret = wl1271_set_rx_streaming(wl, wlvif, false);
217 wl1271_ps_elp_sleep(wl);
219 mutex_unlock(&wl->mutex);
222 static void wl1271_rx_streaming_timer(unsigned long data)
224 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
225 struct wl1271 *wl = wlvif->wl;
226 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
232 /* if the watchdog is not armed, don't do anything */
233 if (wl->tx_allocated_blocks == 0)
236 cancel_delayed_work(&wl->tx_watchdog_work);
237 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
238 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
241 static void wl12xx_tx_watchdog_work(struct work_struct *work)
243 struct delayed_work *dwork;
246 dwork = container_of(work, struct delayed_work, work);
247 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
249 mutex_lock(&wl->mutex);
251 if (unlikely(wl->state == WL1271_STATE_OFF))
254 /* Tx went out in the meantime - everything is ok */
255 if (unlikely(wl->tx_allocated_blocks == 0))
259 * if a ROC is in progress, we might not have any Tx for a long
260 * time (e.g. pending Tx on the non-ROC channels)
262 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
263 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
264 wl->conf.tx.tx_watchdog_timeout);
265 wl12xx_rearm_tx_watchdog_locked(wl);
270 * if a scan is in progress, we might not have any Tx for a long
273 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
275 wl->conf.tx.tx_watchdog_timeout);
276 wl12xx_rearm_tx_watchdog_locked(wl);
281 * AP might cache a frame for a long time for a sleeping station,
282 * so rearm the timer if there's an AP interface with stations. If
283 * Tx is genuinely stuck we will most hopefully discover it when all
284 * stations are removed due to inactivity.
286 if (wl->active_sta_count) {
287 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
289 wl->conf.tx.tx_watchdog_timeout,
290 wl->active_sta_count);
291 wl12xx_rearm_tx_watchdog_locked(wl);
295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 wl->conf.tx.tx_watchdog_timeout);
297 wl12xx_queue_recovery_work(wl);
300 mutex_unlock(&wl->mutex);
303 static void wlcore_adjust_conf(struct wl1271 *wl)
305 /* Adjust settings according to optional module parameters */
307 if (!strcmp(fwlog_param, "continuous")) {
308 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 } else if (!strcmp(fwlog_param, "ondemand")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
311 } else if (!strcmp(fwlog_param, "dbgpins")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 } else if (!strcmp(fwlog_param, "disable")) {
315 wl->conf.fwlog.mem_blocks = 0;
316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
323 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
324 struct wl12xx_vif *wlvif,
327 bool fw_ps, single_sta;
329 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
330 single_sta = (wl->active_sta_count == 1);
333 * Wake up from high level PS if the STA is asleep with too little
334 * packets in FW or if the STA is awake.
336 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
337 wl12xx_ps_link_end(wl, wlvif, hlid);
340 * Start high-level PS if the STA is asleep with enough blocks in FW.
341 * Make an exception if this is the only connected station. In this
342 * case FW-memory congestion is not a problem.
344 else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
345 wl12xx_ps_link_start(wl, wlvif, hlid, true);
348 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
349 struct wl12xx_vif *wlvif,
350 struct wl_fw_status_2 *status)
352 struct wl1271_link *lnk;
356 /* TODO: also use link_fast_bitmap here */
358 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
359 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
360 wl1271_debug(DEBUG_PSM,
361 "link ps prev 0x%x cur 0x%x changed 0x%x",
362 wl->ap_fw_ps_map, cur_fw_ps_map,
363 wl->ap_fw_ps_map ^ cur_fw_ps_map);
365 wl->ap_fw_ps_map = cur_fw_ps_map;
368 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
369 lnk = &wl->links[hlid];
370 cnt = status->counters.tx_lnk_free_pkts[hlid] -
371 lnk->prev_freed_pkts;
373 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
374 lnk->allocated_pkts -= cnt;
376 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 lnk->allocated_pkts);
381 static void wl12xx_fw_status(struct wl1271 *wl,
382 struct wl_fw_status_1 *status_1,
383 struct wl_fw_status_2 *status_2)
385 struct wl12xx_vif *wlvif;
387 u32 old_tx_blk_count = wl->tx_blocks_available;
388 int avail, freed_blocks;
392 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
393 sizeof(*status_2) + wl->fw_status_priv_len;
395 wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
398 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
399 "drv_rx_counter = %d, tx_results_counter = %d)",
401 status_1->fw_rx_counter,
402 status_1->drv_rx_counter,
403 status_1->tx_results_counter);
405 for (i = 0; i < NUM_TX_QUEUES; i++) {
406 /* prevent wrap-around in freed-packets counter */
407 wl->tx_allocated_pkts[i] -=
408 (status_2->counters.tx_released_pkts[i] -
409 wl->tx_pkts_freed[i]) & 0xff;
411 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
414 /* prevent wrap-around in total blocks counter */
415 if (likely(wl->tx_blocks_freed <=
416 le32_to_cpu(status_2->total_released_blks)))
417 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
420 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
421 le32_to_cpu(status_2->total_released_blks);
423 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
425 wl->tx_allocated_blocks -= freed_blocks;
428 * If the FW freed some blocks:
429 * If we still have allocated blocks - re-arm the timer, Tx is
430 * not stuck. Otherwise, cancel the timer (no Tx currently).
433 if (wl->tx_allocated_blocks)
434 wl12xx_rearm_tx_watchdog_locked(wl);
436 cancel_delayed_work(&wl->tx_watchdog_work);
439 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
442 * The FW might change the total number of TX memblocks before
443 * we get a notification about blocks being released. Thus, the
444 * available blocks calculation might yield a temporary result
445 * which is lower than the actual available blocks. Keeping in
446 * mind that only blocks that were allocated can be moved from
447 * TX to RX, tx_blocks_available should never decrease here.
449 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
452 /* if more blocks are available now, tx work can be scheduled */
453 if (wl->tx_blocks_available > old_tx_blk_count)
454 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
456 /* for AP update num of allocated TX blocks per link and ps status */
457 wl12xx_for_each_wlvif_ap(wl, wlvif) {
458 wl12xx_irq_update_links_status(wl, wlvif, status_2);
461 /* update the host-chipset time offset */
463 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
464 (s64)le32_to_cpu(status_2->fw_localtime);
467 static void wl1271_flush_deferred_work(struct wl1271 *wl)
471 /* Pass all received frames to the network stack */
472 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
473 ieee80211_rx_ni(wl->hw, skb);
475 /* Return sent skbs to the network stack */
476 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
477 ieee80211_tx_status_ni(wl->hw, skb);
480 static void wl1271_netstack_work(struct work_struct *work)
483 container_of(work, struct wl1271, netstack_work);
486 wl1271_flush_deferred_work(wl);
487 } while (skb_queue_len(&wl->deferred_rx_queue));
490 #define WL1271_IRQ_MAX_LOOPS 256
492 static irqreturn_t wl1271_irq(int irq, void *cookie)
496 int loopcount = WL1271_IRQ_MAX_LOOPS;
497 struct wl1271 *wl = (struct wl1271 *)cookie;
499 unsigned int defer_count;
502 /* TX might be handled here, avoid redundant work */
503 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
504 cancel_work_sync(&wl->tx_work);
507 * In case edge triggered interrupt must be used, we cannot iterate
508 * more than once without introducing race conditions with the hardirq.
510 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
513 mutex_lock(&wl->mutex);
515 wl1271_debug(DEBUG_IRQ, "IRQ work");
517 if (unlikely(wl->state == WL1271_STATE_OFF))
520 ret = wl1271_ps_elp_wakeup(wl);
524 while (!done && loopcount--) {
526 * In order to avoid a race with the hardirq, clear the flag
527 * before acknowledging the chip. Since the mutex is held,
528 * wl1271_ps_elp_wakeup cannot be called concurrently.
530 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
531 smp_mb__after_clear_bit();
533 wl12xx_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
535 wlcore_hw_tx_immediate_compl(wl);
537 intr = le32_to_cpu(wl->fw_status_1->intr);
538 intr &= WL1271_INTR_MASK;
544 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
545 wl1271_error("watchdog interrupt received! "
546 "starting recovery.");
547 wl12xx_queue_recovery_work(wl);
549 /* restarting the chip. ignore any other interrupt. */
553 if (likely(intr & WL1271_ACX_INTR_DATA)) {
554 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
556 wl12xx_rx(wl, wl->fw_status_1);
558 /* Check if any tx blocks were freed */
559 spin_lock_irqsave(&wl->wl_lock, flags);
560 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
561 wl1271_tx_total_queue_count(wl) > 0) {
562 spin_unlock_irqrestore(&wl->wl_lock, flags);
564 * In order to avoid starvation of the TX path,
565 * call the work function directly.
567 wl1271_tx_work_locked(wl);
569 spin_unlock_irqrestore(&wl->wl_lock, flags);
572 /* check for tx results */
573 wlcore_hw_tx_delayed_compl(wl);
575 /* Make sure the deferred queues don't get too long */
576 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
577 skb_queue_len(&wl->deferred_rx_queue);
578 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
579 wl1271_flush_deferred_work(wl);
582 if (intr & WL1271_ACX_INTR_EVENT_A) {
583 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
584 wl1271_event_handle(wl, 0);
587 if (intr & WL1271_ACX_INTR_EVENT_B) {
588 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
589 wl1271_event_handle(wl, 1);
592 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
593 wl1271_debug(DEBUG_IRQ,
594 "WL1271_ACX_INTR_INIT_COMPLETE");
596 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
597 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
600 wl1271_ps_elp_sleep(wl);
603 spin_lock_irqsave(&wl->wl_lock, flags);
604 /* In case TX was not handled here, queue TX work */
605 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
606 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
607 wl1271_tx_total_queue_count(wl) > 0)
608 ieee80211_queue_work(wl->hw, &wl->tx_work);
609 spin_unlock_irqrestore(&wl->wl_lock, flags);
611 mutex_unlock(&wl->mutex);
616 struct vif_counter_data {
619 struct ieee80211_vif *cur_vif;
620 bool cur_vif_running;
623 static void wl12xx_vif_count_iter(void *data, u8 *mac,
624 struct ieee80211_vif *vif)
626 struct vif_counter_data *counter = data;
629 if (counter->cur_vif == vif)
630 counter->cur_vif_running = true;
633 /* caller must not hold wl->mutex, as it might deadlock */
634 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
635 struct ieee80211_vif *cur_vif,
636 struct vif_counter_data *data)
638 memset(data, 0, sizeof(*data));
639 data->cur_vif = cur_vif;
641 ieee80211_iterate_active_interfaces(hw,
642 wl12xx_vif_count_iter, data);
645 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
647 const struct firmware *fw;
649 enum wl12xx_fw_type fw_type;
653 fw_type = WL12XX_FW_TYPE_PLT;
654 fw_name = wl->plt_fw_name;
657 * we can't call wl12xx_get_vif_count() here because
658 * wl->mutex is taken, so use the cached last_vif_count value
660 if (wl->last_vif_count > 1) {
661 fw_type = WL12XX_FW_TYPE_MULTI;
662 fw_name = wl->mr_fw_name;
664 fw_type = WL12XX_FW_TYPE_NORMAL;
665 fw_name = wl->sr_fw_name;
669 if (wl->fw_type == fw_type)
672 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
674 ret = request_firmware(&fw, fw_name, wl->dev);
677 wl1271_error("could not get firmware %s: %d", fw_name, ret);
682 wl1271_error("firmware size is not multiple of 32 bits: %zu",
689 wl->fw_type = WL12XX_FW_TYPE_NONE;
690 wl->fw_len = fw->size;
691 wl->fw = vmalloc(wl->fw_len);
694 wl1271_error("could not allocate memory for the firmware");
699 memcpy(wl->fw, fw->data, wl->fw_len);
701 wl->fw_type = fw_type;
703 release_firmware(fw);
708 static int wl1271_fetch_nvs(struct wl1271 *wl)
710 const struct firmware *fw;
713 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
716 wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
721 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
724 wl1271_error("could not allocate memory for the nvs file");
729 wl->nvs_len = fw->size;
732 release_firmware(fw);
737 void wl12xx_queue_recovery_work(struct wl1271 *wl)
739 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
740 ieee80211_queue_work(wl->hw, &wl->recovery_work);
743 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
747 /* The FW log is a length-value list, find where the log end */
748 while (len < maxlen) {
749 if (memblock[len] == 0)
751 if (len + memblock[len] + 1 > maxlen)
753 len += memblock[len] + 1;
756 /* Make sure we have enough room */
757 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
759 /* Fill the FW log file, consumed by the sysfs fwlog entry */
760 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
761 wl->fwlog_size += len;
766 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
772 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
773 (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) ||
774 (wl->conf.fwlog.mem_blocks == 0))
777 wl1271_info("Reading FW panic log");
779 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
784 * Make sure the chip is awake and the logger isn't active.
785 * This might fail if the firmware hanged.
787 if (!wl1271_ps_elp_wakeup(wl))
788 wl12xx_cmd_stop_fwlog(wl);
790 /* Read the first memory block address */
791 wl12xx_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
792 first_addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
796 /* Traverse the memory blocks linked list */
799 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
800 wl1271_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
804 * Memory blocks are linked to one another. The first 4 bytes
805 * of each memory block hold the hardware address of the next
806 * one. The last memory block points to the first one.
808 addr = le32_to_cpup((__le32 *)block);
809 if (!wl12xx_copy_fwlog(wl, block + sizeof(addr),
810 WL12XX_HW_BLOCK_SIZE - sizeof(addr)))
812 } while (addr && (addr != first_addr));
814 wake_up_interruptible(&wl->fwlog_waitq);
820 static void wl1271_recovery_work(struct work_struct *work)
823 container_of(work, struct wl1271, recovery_work);
824 struct wl12xx_vif *wlvif;
825 struct ieee80211_vif *vif;
827 mutex_lock(&wl->mutex);
829 if (wl->state != WL1271_STATE_ON || wl->plt)
832 /* Avoid a recursive recovery */
833 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
835 wl12xx_read_fwlog_panic(wl);
837 /* change partitions momentarily so we can read the FW pc */
838 wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
839 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x "
842 wlcore_read_reg(wl, REG_PC_ON_RECOVERY),
843 wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR));
844 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
846 BUG_ON(bug_on_recovery &&
847 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
850 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
851 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
855 BUG_ON(bug_on_recovery);
858 * Advance security sequence number to overcome potential progress
859 * in the firmware during recovery. This doens't hurt if the network is
862 wl12xx_for_each_wlvif(wl, wlvif) {
863 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
864 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
865 wlvif->tx_security_seq +=
866 WL1271_TX_SQN_POST_RECOVERY_PADDING;
869 /* Prevent spurious TX during FW restart */
870 ieee80211_stop_queues(wl->hw);
872 if (wl->sched_scanning) {
873 ieee80211_sched_scan_stopped(wl->hw);
874 wl->sched_scanning = false;
877 /* reboot the chipset */
878 while (!list_empty(&wl->wlvif_list)) {
879 wlvif = list_first_entry(&wl->wlvif_list,
880 struct wl12xx_vif, list);
881 vif = wl12xx_wlvif_to_vif(wlvif);
882 __wl1271_op_remove_interface(wl, vif, false);
884 mutex_unlock(&wl->mutex);
885 wl1271_op_stop(wl->hw);
887 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
889 ieee80211_restart_hw(wl->hw);
892 * Its safe to enable TX now - the queues are stopped after a request
895 ieee80211_wake_queues(wl->hw);
898 mutex_unlock(&wl->mutex);
901 static void wl1271_fw_wakeup(struct wl1271 *wl)
903 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
906 static int wl1271_setup(struct wl1271 *wl)
908 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
909 sizeof(*wl->fw_status_2) +
910 wl->fw_status_priv_len, GFP_KERNEL);
911 if (!wl->fw_status_1)
914 wl->fw_status_2 = (struct wl_fw_status_2 *)
915 (((u8 *) wl->fw_status_1) +
916 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
918 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
919 if (!wl->tx_res_if) {
920 kfree(wl->fw_status_1);
927 static int wl12xx_set_power_on(struct wl1271 *wl)
931 msleep(WL1271_PRE_POWER_ON_SLEEP);
932 ret = wl1271_power_on(wl);
935 msleep(WL1271_POWER_ON_SLEEP);
939 wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
941 /* ELP module wake up */
942 wl1271_fw_wakeup(wl);
948 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
952 ret = wl12xx_set_power_on(wl);
957 * For wl127x based devices we could use the default block
958 * size (512 bytes), but due to a bug in the sdio driver, we
959 * need to set it explicitly after the chip is powered on. To
960 * simplify the code and since the performance impact is
961 * negligible, we use the same block size for all different
964 if (wl1271_set_block_size(wl))
965 wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
967 /* TODO: make sure the lower driver has set things up correctly */
969 ret = wl1271_setup(wl);
973 ret = wl12xx_fetch_firmware(wl, plt);
977 /* No NVS from netlink, try to get it from the filesystem */
978 if (wl->nvs == NULL) {
979 ret = wl1271_fetch_nvs(wl);
988 int wl1271_plt_start(struct wl1271 *wl)
990 int retries = WL1271_BOOT_RETRIES;
991 struct wiphy *wiphy = wl->hw->wiphy;
994 mutex_lock(&wl->mutex);
996 wl1271_notice("power up");
998 if (wl->state != WL1271_STATE_OFF) {
999 wl1271_error("cannot go into PLT state because not "
1000 "in off state: %d", wl->state);
1007 ret = wl12xx_chip_wakeup(wl, true);
1011 ret = wl->ops->plt_init(wl);
1016 wl->state = WL1271_STATE_ON;
1017 wl1271_notice("firmware booted in PLT mode (%s)",
1018 wl->chip.fw_ver_str);
1020 /* update hw/fw version info in wiphy struct */
1021 wiphy->hw_version = wl->chip.id;
1022 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1023 sizeof(wiphy->fw_version));
1028 wl1271_power_off(wl);
1031 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1032 WL1271_BOOT_RETRIES);
1034 mutex_unlock(&wl->mutex);
1039 int wl1271_plt_stop(struct wl1271 *wl)
1043 wl1271_notice("power down");
1046 * Interrupts must be disabled before setting the state to OFF.
1047 * Otherwise, the interrupt handler might be called and exit without
1048 * reading the interrupt status.
1050 wlcore_disable_interrupts(wl);
1051 mutex_lock(&wl->mutex);
1053 mutex_unlock(&wl->mutex);
1056 * This will not necessarily enable interrupts as interrupts
1057 * may have been disabled when op_stop was called. It will,
1058 * however, balance the above call to disable_interrupts().
1060 wlcore_enable_interrupts(wl);
1062 wl1271_error("cannot power down because not in PLT "
1063 "state: %d", wl->state);
1068 mutex_unlock(&wl->mutex);
1070 wl1271_flush_deferred_work(wl);
1071 cancel_work_sync(&wl->netstack_work);
1072 cancel_work_sync(&wl->recovery_work);
1073 cancel_delayed_work_sync(&wl->elp_work);
1074 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1075 cancel_delayed_work_sync(&wl->connection_loss_work);
1077 mutex_lock(&wl->mutex);
1078 wl1271_power_off(wl);
1080 wl->state = WL1271_STATE_OFF;
1083 mutex_unlock(&wl->mutex);
1089 static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1091 struct wl1271 *wl = hw->priv;
1092 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1093 struct ieee80211_vif *vif = info->control.vif;
1094 struct wl12xx_vif *wlvif = NULL;
1095 unsigned long flags;
1100 wlvif = wl12xx_vif_to_data(vif);
1102 mapping = skb_get_queue_mapping(skb);
1103 q = wl1271_tx_get_queue(mapping);
1105 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
1107 spin_lock_irqsave(&wl->wl_lock, flags);
1109 /* queue the packet */
1110 if (hlid == WL12XX_INVALID_LINK_ID ||
1111 (wlvif && !test_bit(hlid, wlvif->links_map))) {
1112 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1113 ieee80211_free_txskb(hw, skb);
1117 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1119 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1121 wl->tx_queue_count[q]++;
1124 * The workqueue is slow to process the tx_queue and we need stop
1125 * the queue here, otherwise the queue will get too long.
1127 if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
1128 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1129 ieee80211_stop_queue(wl->hw, mapping);
1130 set_bit(q, &wl->stopped_queues_map);
1134 * The chip specific setup must run before the first TX packet -
1135 * before that, the tx_work will not be initialized!
1138 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1139 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1140 ieee80211_queue_work(wl->hw, &wl->tx_work);
1143 spin_unlock_irqrestore(&wl->wl_lock, flags);
1146 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1148 unsigned long flags;
1151 /* no need to queue a new dummy packet if one is already pending */
1152 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1155 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1157 spin_lock_irqsave(&wl->wl_lock, flags);
1158 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1159 wl->tx_queue_count[q]++;
1160 spin_unlock_irqrestore(&wl->wl_lock, flags);
1162 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1163 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1164 wl1271_tx_work_locked(wl);
1167 * If the FW TX is busy, TX work will be scheduled by the threaded
1168 * interrupt handler function
1174 * The size of the dummy packet should be at least 1400 bytes. However, in
1175 * order to minimize the number of bus transactions, aligning it to 512 bytes
1176 * boundaries could be beneficial, performance wise
1178 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1180 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1182 struct sk_buff *skb;
1183 struct ieee80211_hdr_3addr *hdr;
1184 unsigned int dummy_packet_size;
1186 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1187 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1189 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1191 wl1271_warning("Failed to allocate a dummy packet skb");
1195 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1197 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1198 memset(hdr, 0, sizeof(*hdr));
1199 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1200 IEEE80211_STYPE_NULLFUNC |
1201 IEEE80211_FCTL_TODS);
1203 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1205 /* Dummy packets require the TID to be management */
1206 skb->priority = WL1271_TID_MGMT;
1208 /* Initialize all fields that might be used */
1209 skb_set_queue_mapping(skb, 0);
1210 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1218 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1220 int num_fields = 0, in_field = 0, fields_size = 0;
1221 int i, pattern_len = 0;
1224 wl1271_warning("No mask in WoWLAN pattern");
1229 * The pattern is broken up into segments of bytes at different offsets
1230 * that need to be checked by the FW filter. Each segment is called
1231 * a field in the FW API. We verify that the total number of fields
1232 * required for this pattern won't exceed FW limits (8)
1233 * as well as the total fields buffer won't exceed the FW limit.
1234 * Note that if there's a pattern which crosses Ethernet/IP header
1235 * boundary a new field is required.
1237 for (i = 0; i < p->pattern_len; i++) {
1238 if (test_bit(i, (unsigned long *)p->mask)) {
1243 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1245 fields_size += pattern_len +
1246 RX_FILTER_FIELD_OVERHEAD;
1254 fields_size += pattern_len +
1255 RX_FILTER_FIELD_OVERHEAD;
1262 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1266 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1267 wl1271_warning("RX Filter too complex. Too many segments");
1271 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1272 wl1271_warning("RX filter pattern is too big");
1279 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1281 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1284 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1291 for (i = 0; i < filter->num_fields; i++)
1292 kfree(filter->fields[i].pattern);
1297 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1298 u16 offset, u8 flags,
1299 u8 *pattern, u8 len)
1301 struct wl12xx_rx_filter_field *field;
1303 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1304 wl1271_warning("Max fields per RX filter. can't alloc another");
1308 field = &filter->fields[filter->num_fields];
1310 field->pattern = kzalloc(len, GFP_KERNEL);
1311 if (!field->pattern) {
1312 wl1271_warning("Failed to allocate RX filter pattern");
1316 filter->num_fields++;
1318 field->offset = cpu_to_le16(offset);
1319 field->flags = flags;
1321 memcpy(field->pattern, pattern, len);
1326 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1328 int i, fields_size = 0;
1330 for (i = 0; i < filter->num_fields; i++)
1331 fields_size += filter->fields[i].len +
1332 sizeof(struct wl12xx_rx_filter_field) -
1338 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1342 struct wl12xx_rx_filter_field *field;
1344 for (i = 0; i < filter->num_fields; i++) {
1345 field = (struct wl12xx_rx_filter_field *)buf;
1347 field->offset = filter->fields[i].offset;
1348 field->flags = filter->fields[i].flags;
1349 field->len = filter->fields[i].len;
1351 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1352 buf += sizeof(struct wl12xx_rx_filter_field) -
1353 sizeof(u8 *) + field->len;
1358 * Allocates an RX filter returned through f
1359 * which needs to be freed using rx_filter_free()
1361 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1362 struct cfg80211_wowlan_trig_pkt_pattern *p,
1363 struct wl12xx_rx_filter **f)
1366 struct wl12xx_rx_filter *filter;
1370 filter = wl1271_rx_filter_alloc();
1372 wl1271_warning("Failed to alloc rx filter");
1378 while (i < p->pattern_len) {
1379 if (!test_bit(i, (unsigned long *)p->mask)) {
1384 for (j = i; j < p->pattern_len; j++) {
1385 if (!test_bit(j, (unsigned long *)p->mask))
1388 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1389 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1393 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1395 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1397 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1398 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1403 ret = wl1271_rx_filter_alloc_field(filter,
1406 &p->pattern[i], len);
1413 filter->action = FILTER_SIGNAL;
1419 wl1271_rx_filter_free(filter);
1425 static int wl1271_configure_wowlan(struct wl1271 *wl,
1426 struct cfg80211_wowlan *wow)
1430 if (!wow || wow->any || !wow->n_patterns) {
1431 wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1432 wl1271_rx_filter_clear_all(wl);
1436 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1439 /* Validate all incoming patterns before clearing current FW state */
1440 for (i = 0; i < wow->n_patterns; i++) {
1441 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1443 wl1271_warning("Bad wowlan pattern %d", i);
1448 wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1449 wl1271_rx_filter_clear_all(wl);
1451 /* Translate WoWLAN patterns into filters */
1452 for (i = 0; i < wow->n_patterns; i++) {
1453 struct cfg80211_wowlan_trig_pkt_pattern *p;
1454 struct wl12xx_rx_filter *filter = NULL;
1456 p = &wow->patterns[i];
1458 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1460 wl1271_warning("Failed to create an RX filter from "
1461 "wowlan pattern %d", i);
1465 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1467 wl1271_rx_filter_free(filter);
1472 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1478 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1479 struct wl12xx_vif *wlvif,
1480 struct cfg80211_wowlan *wow)
1484 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1487 ret = wl1271_ps_elp_wakeup(wl);
1491 wl1271_configure_wowlan(wl, wow);
1492 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1493 wl->conf.conn.suspend_wake_up_event,
1494 wl->conf.conn.suspend_listen_interval);
1497 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1499 wl1271_ps_elp_sleep(wl);
1506 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1507 struct wl12xx_vif *wlvif)
1511 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1514 ret = wl1271_ps_elp_wakeup(wl);
1518 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1520 wl1271_ps_elp_sleep(wl);
1526 static int wl1271_configure_suspend(struct wl1271 *wl,
1527 struct wl12xx_vif *wlvif,
1528 struct cfg80211_wowlan *wow)
1530 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1531 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1532 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1533 return wl1271_configure_suspend_ap(wl, wlvif);
1537 static void wl1271_configure_resume(struct wl1271 *wl,
1538 struct wl12xx_vif *wlvif)
1541 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1542 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1544 if ((!is_ap) && (!is_sta))
1547 ret = wl1271_ps_elp_wakeup(wl);
1552 wl1271_configure_wowlan(wl, NULL);
1554 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1555 wl->conf.conn.wake_up_event,
1556 wl->conf.conn.listen_interval);
1559 wl1271_error("resume: wake up conditions failed: %d",
1563 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1566 wl1271_ps_elp_sleep(wl);
1569 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1570 struct cfg80211_wowlan *wow)
1572 struct wl1271 *wl = hw->priv;
1573 struct wl12xx_vif *wlvif;
1576 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1579 wl1271_tx_flush(wl);
1581 mutex_lock(&wl->mutex);
1582 wl->wow_enabled = true;
1583 wl12xx_for_each_wlvif(wl, wlvif) {
1584 ret = wl1271_configure_suspend(wl, wlvif, wow);
1586 mutex_unlock(&wl->mutex);
1587 wl1271_warning("couldn't prepare device to suspend");
1591 mutex_unlock(&wl->mutex);
1592 /* flush any remaining work */
1593 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1596 * disable and re-enable interrupts in order to flush
1599 wlcore_disable_interrupts(wl);
1602 * set suspended flag to avoid triggering a new threaded_irq
1603 * work. no need for spinlock as interrupts are disabled.
1605 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1607 wlcore_enable_interrupts(wl);
1608 flush_work(&wl->tx_work);
1609 flush_delayed_work(&wl->elp_work);
1614 static int wl1271_op_resume(struct ieee80211_hw *hw)
1616 struct wl1271 *wl = hw->priv;
1617 struct wl12xx_vif *wlvif;
1618 unsigned long flags;
1619 bool run_irq_work = false;
1621 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1623 WARN_ON(!wl->wow_enabled);
1626 * re-enable irq_work enqueuing, and call irq_work directly if
1627 * there is a pending work.
1629 spin_lock_irqsave(&wl->wl_lock, flags);
1630 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1631 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1632 run_irq_work = true;
1633 spin_unlock_irqrestore(&wl->wl_lock, flags);
1636 wl1271_debug(DEBUG_MAC80211,
1637 "run postponed irq_work directly");
1639 wlcore_enable_interrupts(wl);
1642 mutex_lock(&wl->mutex);
1643 wl12xx_for_each_wlvif(wl, wlvif) {
1644 wl1271_configure_resume(wl, wlvif);
1646 wl->wow_enabled = false;
1647 mutex_unlock(&wl->mutex);
1653 static int wl1271_op_start(struct ieee80211_hw *hw)
1655 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1658 * We have to delay the booting of the hardware because
1659 * we need to know the local MAC address before downloading and
1660 * initializing the firmware. The MAC address cannot be changed
1661 * after boot, and without the proper MAC address, the firmware
1662 * will not function properly.
1664 * The MAC address is first known when the corresponding interface
1665 * is added. That is where we will initialize the hardware.
1671 static void wl1271_op_stop(struct ieee80211_hw *hw)
1673 struct wl1271 *wl = hw->priv;
1676 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1679 * Interrupts must be disabled before setting the state to OFF.
1680 * Otherwise, the interrupt handler might be called and exit without
1681 * reading the interrupt status.
1683 wlcore_disable_interrupts(wl);
1684 mutex_lock(&wl->mutex);
1685 if (wl->state == WL1271_STATE_OFF) {
1686 mutex_unlock(&wl->mutex);
1689 * This will not necessarily enable interrupts as interrupts
1690 * may have been disabled when op_stop was called. It will,
1691 * however, balance the above call to disable_interrupts().
1693 wlcore_enable_interrupts(wl);
1698 * this must be before the cancel_work calls below, so that the work
1699 * functions don't perform further work.
1701 wl->state = WL1271_STATE_OFF;
1702 mutex_unlock(&wl->mutex);
1704 wl1271_flush_deferred_work(wl);
1705 cancel_delayed_work_sync(&wl->scan_complete_work);
1706 cancel_work_sync(&wl->netstack_work);
1707 cancel_work_sync(&wl->tx_work);
1708 cancel_delayed_work_sync(&wl->elp_work);
1709 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1710 cancel_delayed_work_sync(&wl->connection_loss_work);
1712 /* let's notify MAC80211 about the remaining pending TX frames */
1713 wl12xx_tx_reset(wl, true);
1714 mutex_lock(&wl->mutex);
1716 wl1271_power_off(wl);
1718 wl->band = IEEE80211_BAND_2GHZ;
1721 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1722 wl->channel_type = NL80211_CHAN_NO_HT;
1723 wl->tx_blocks_available = 0;
1724 wl->tx_allocated_blocks = 0;
1725 wl->tx_results_count = 0;
1726 wl->tx_packets_count = 0;
1727 wl->time_offset = 0;
1728 wl->ap_fw_ps_map = 0;
1730 wl->sched_scanning = false;
1731 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1732 memset(wl->links_map, 0, sizeof(wl->links_map));
1733 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1734 wl->active_sta_count = 0;
1736 /* The system link is always allocated */
1737 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1740 * this is performed after the cancel_work calls and the associated
1741 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1742 * get executed before all these vars have been reset.
1746 wl->tx_blocks_freed = 0;
1748 for (i = 0; i < NUM_TX_QUEUES; i++) {
1749 wl->tx_pkts_freed[i] = 0;
1750 wl->tx_allocated_pkts[i] = 0;
1753 wl1271_debugfs_reset(wl);
1755 kfree(wl->fw_status_1);
1756 wl->fw_status_1 = NULL;
1757 wl->fw_status_2 = NULL;
1758 kfree(wl->tx_res_if);
1759 wl->tx_res_if = NULL;
1760 kfree(wl->target_mem_map);
1761 wl->target_mem_map = NULL;
1763 mutex_unlock(&wl->mutex);
1766 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
1768 u8 policy = find_first_zero_bit(wl->rate_policies_map,
1769 WL12XX_MAX_RATE_POLICIES);
1770 if (policy >= WL12XX_MAX_RATE_POLICIES)
1773 __set_bit(policy, wl->rate_policies_map);
1778 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
1780 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
1783 __clear_bit(*idx, wl->rate_policies_map);
1784 *idx = WL12XX_MAX_RATE_POLICIES;
1787 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1789 switch (wlvif->bss_type) {
1790 case BSS_TYPE_AP_BSS:
1792 return WL1271_ROLE_P2P_GO;
1794 return WL1271_ROLE_AP;
1796 case BSS_TYPE_STA_BSS:
1798 return WL1271_ROLE_P2P_CL;
1800 return WL1271_ROLE_STA;
1803 return WL1271_ROLE_IBSS;
1806 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
1808 return WL12XX_INVALID_ROLE_TYPE;
1811 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1813 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
1816 /* clear everything but the persistent data */
1817 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
1819 switch (ieee80211_vif_type_p2p(vif)) {
1820 case NL80211_IFTYPE_P2P_CLIENT:
1823 case NL80211_IFTYPE_STATION:
1824 wlvif->bss_type = BSS_TYPE_STA_BSS;
1826 case NL80211_IFTYPE_ADHOC:
1827 wlvif->bss_type = BSS_TYPE_IBSS;
1829 case NL80211_IFTYPE_P2P_GO:
1832 case NL80211_IFTYPE_AP:
1833 wlvif->bss_type = BSS_TYPE_AP_BSS;
1836 wlvif->bss_type = MAX_BSS_TYPE;
1840 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
1841 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
1842 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
1844 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
1845 wlvif->bss_type == BSS_TYPE_IBSS) {
1846 /* init sta/ibss data */
1847 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
1848 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
1849 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
1850 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
1851 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
1852 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
1853 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
1856 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
1857 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
1858 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
1859 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
1860 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
1861 wl12xx_allocate_rate_policy(wl,
1862 &wlvif->ap.ucast_rate_idx[i]);
1863 wlvif->basic_rate_set = CONF_TX_AP_ENABLED_RATES;
1865 * TODO: check if basic_rate shouldn't be
1866 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
1867 * instead (the same thing for STA above).
1869 wlvif->basic_rate = CONF_TX_AP_ENABLED_RATES;
1870 /* TODO: this seems to be used only for STA, check it */
1871 wlvif->rate_set = CONF_TX_AP_ENABLED_RATES;
1874 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
1875 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
1876 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
1879 * mac80211 configures some values globally, while we treat them
1880 * per-interface. thus, on init, we have to copy them from wl
1882 wlvif->band = wl->band;
1883 wlvif->channel = wl->channel;
1884 wlvif->power_level = wl->power_level;
1885 wlvif->channel_type = wl->channel_type;
1887 INIT_WORK(&wlvif->rx_streaming_enable_work,
1888 wl1271_rx_streaming_enable_work);
1889 INIT_WORK(&wlvif->rx_streaming_disable_work,
1890 wl1271_rx_streaming_disable_work);
1891 INIT_LIST_HEAD(&wlvif->list);
1893 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
1894 (unsigned long) wlvif);
1898 static bool wl12xx_init_fw(struct wl1271 *wl)
1900 int retries = WL1271_BOOT_RETRIES;
1901 bool booted = false;
1902 struct wiphy *wiphy = wl->hw->wiphy;
1907 ret = wl12xx_chip_wakeup(wl, false);
1911 ret = wl->ops->boot(wl);
1915 ret = wl1271_hw_init(wl);
1923 mutex_unlock(&wl->mutex);
1924 /* Unlocking the mutex in the middle of handling is
1925 inherently unsafe. In this case we deem it safe to do,
1926 because we need to let any possibly pending IRQ out of
1927 the system (and while we are WL1271_STATE_OFF the IRQ
1928 work function will not do anything.) Also, any other
1929 possible concurrent operations will fail due to the
1930 current state, hence the wl1271 struct should be safe. */
1931 wlcore_disable_interrupts(wl);
1932 wl1271_flush_deferred_work(wl);
1933 cancel_work_sync(&wl->netstack_work);
1934 mutex_lock(&wl->mutex);
1936 wl1271_power_off(wl);
1940 wl1271_error("firmware boot failed despite %d retries",
1941 WL1271_BOOT_RETRIES);
1945 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
1947 /* update hw/fw version info in wiphy struct */
1948 wiphy->hw_version = wl->chip.id;
1949 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1950 sizeof(wiphy->fw_version));
1953 * Now we know if 11a is supported (info from the NVS), so disable
1954 * 11a channels if not supported
1956 if (!wl->enable_11a)
1957 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
1959 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
1960 wl->enable_11a ? "" : "not ");
1962 wl->state = WL1271_STATE_ON;
1967 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
1969 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
1973 * Check whether a fw switch (i.e. moving from one loaded
1974 * fw to another) is needed. This function is also responsible
1975 * for updating wl->last_vif_count, so it must be called before
1976 * loading a non-plt fw (so the correct fw (single-role/multi-role)
1979 static bool wl12xx_need_fw_change(struct wl1271 *wl,
1980 struct vif_counter_data vif_counter_data,
1983 enum wl12xx_fw_type current_fw = wl->fw_type;
1984 u8 vif_count = vif_counter_data.counter;
1986 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
1989 /* increase the vif count if this is a new vif */
1990 if (add && !vif_counter_data.cur_vif_running)
1993 wl->last_vif_count = vif_count;
1995 /* no need for fw change if the device is OFF */
1996 if (wl->state == WL1271_STATE_OFF)
1999 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2001 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2008 * Enter "forced psm". Make sure the sta is in psm against the ap,
2009 * to make the fw switch a bit more disconnection-persistent.
2011 static void wl12xx_force_active_psm(struct wl1271 *wl)
2013 struct wl12xx_vif *wlvif;
2015 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2016 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2020 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2021 struct ieee80211_vif *vif)
2023 struct wl1271 *wl = hw->priv;
2024 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2025 struct vif_counter_data vif_count;
2028 bool booted = false;
2030 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2031 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2033 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2034 ieee80211_vif_type_p2p(vif), vif->addr);
2036 wl12xx_get_vif_count(hw, vif, &vif_count);
2038 mutex_lock(&wl->mutex);
2039 ret = wl1271_ps_elp_wakeup(wl);
2044 * in some very corner case HW recovery scenarios its possible to
2045 * get here before __wl1271_op_remove_interface is complete, so
2046 * opt out if that is the case.
2048 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2049 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2055 ret = wl12xx_init_vif_data(wl, vif);
2060 role_type = wl12xx_get_role_type(wl, wlvif);
2061 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2066 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2067 wl12xx_force_active_psm(wl);
2068 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2069 mutex_unlock(&wl->mutex);
2070 wl1271_recovery_work(&wl->recovery_work);
2075 * TODO: after the nvs issue will be solved, move this block
2076 * to start(), and make sure here the driver is ON.
2078 if (wl->state == WL1271_STATE_OFF) {
2080 * we still need this in order to configure the fw
2081 * while uploading the nvs
2083 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2085 booted = wl12xx_init_fw(wl);
2092 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2093 wlvif->bss_type == BSS_TYPE_IBSS) {
2095 * The device role is a special role used for
2096 * rx and tx frames prior to association (as
2097 * the STA role can get packets only from
2098 * its associated bssid)
2100 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2102 &wlvif->dev_role_id);
2107 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2108 role_type, &wlvif->role_id);
2112 ret = wl1271_init_vif_specific(wl, vif);
2116 list_add(&wlvif->list, &wl->wlvif_list);
2117 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2119 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2124 wl1271_ps_elp_sleep(wl);
2126 mutex_unlock(&wl->mutex);
2131 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2132 struct ieee80211_vif *vif,
2133 bool reset_tx_queues)
2135 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2138 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2140 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2143 /* because of hardware recovery, we may get here twice */
2144 if (wl->state != WL1271_STATE_ON)
2147 wl1271_info("down");
2149 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2150 wl->scan_vif == vif) {
2152 * Rearm the tx watchdog just before idling scan. This
2153 * prevents just-finished scans from triggering the watchdog
2155 wl12xx_rearm_tx_watchdog_locked(wl);
2157 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2158 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2159 wl->scan_vif = NULL;
2160 wl->scan.req = NULL;
2161 ieee80211_scan_completed(wl->hw, true);
2164 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2165 /* disable active roles */
2166 ret = wl1271_ps_elp_wakeup(wl);
2170 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2171 wlvif->bss_type == BSS_TYPE_IBSS) {
2172 if (wl12xx_dev_role_started(wlvif))
2173 wl12xx_stop_dev(wl, wlvif);
2175 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2180 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2184 wl1271_ps_elp_sleep(wl);
2187 /* clear all hlids (except system_hlid) */
2188 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2190 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2191 wlvif->bss_type == BSS_TYPE_IBSS) {
2192 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2193 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2194 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2195 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2197 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2198 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2199 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2200 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2201 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2202 wl12xx_free_rate_policy(wl,
2203 &wlvif->ap.ucast_rate_idx[i]);
2204 wl1271_free_ap_keys(wl, wlvif);
2207 dev_kfree_skb(wlvif->probereq);
2208 wlvif->probereq = NULL;
2209 wl12xx_tx_reset_wlvif(wl, wlvif);
2210 if (wl->last_wlvif == wlvif)
2211 wl->last_wlvif = NULL;
2212 list_del(&wlvif->list);
2213 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2214 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2215 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2217 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2222 mutex_unlock(&wl->mutex);
2224 del_timer_sync(&wlvif->rx_streaming_timer);
2225 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2226 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2228 mutex_lock(&wl->mutex);
2231 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2232 struct ieee80211_vif *vif)
2234 struct wl1271 *wl = hw->priv;
2235 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2236 struct wl12xx_vif *iter;
2237 struct vif_counter_data vif_count;
2238 bool cancel_recovery = true;
2240 wl12xx_get_vif_count(hw, vif, &vif_count);
2241 mutex_lock(&wl->mutex);
2243 if (wl->state == WL1271_STATE_OFF ||
2244 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2248 * wl->vif can be null here if someone shuts down the interface
2249 * just when hardware recovery has been started.
2251 wl12xx_for_each_wlvif(wl, iter) {
2255 __wl1271_op_remove_interface(wl, vif, true);
2258 WARN_ON(iter != wlvif);
2259 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2260 wl12xx_force_active_psm(wl);
2261 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2262 wl12xx_queue_recovery_work(wl);
2263 cancel_recovery = false;
2266 mutex_unlock(&wl->mutex);
2267 if (cancel_recovery)
2268 cancel_work_sync(&wl->recovery_work);
2271 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2272 struct ieee80211_vif *vif,
2273 enum nl80211_iftype new_type, bool p2p)
2275 struct wl1271 *wl = hw->priv;
2278 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2279 wl1271_op_remove_interface(hw, vif);
2281 vif->type = new_type;
2283 ret = wl1271_op_add_interface(hw, vif);
2285 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2289 static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2293 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2296 * One of the side effects of the JOIN command is that is clears
2297 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2298 * to a WPA/WPA2 access point will therefore kill the data-path.
2299 * Currently the only valid scenario for JOIN during association
2300 * is on roaming, in which case we will also be given new keys.
2301 * Keep the below message for now, unless it starts bothering
2302 * users who really like to roam a lot :)
2304 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2305 wl1271_info("JOIN while associated.");
2307 /* clear encryption type */
2308 wlvif->encryption_type = KEY_NONE;
2311 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2314 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2316 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2320 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2324 * The join command disable the keep-alive mode, shut down its process,
2325 * and also clear the template config, so we need to reset it all after
2326 * the join. The acx_aid starts the keep-alive process, and the order
2327 * of the commands below is relevant.
2329 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2333 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2337 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2341 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2342 CMD_TEMPL_KLV_IDX_NULL_DATA,
2343 ACX_KEEP_ALIVE_TPL_VALID);
2351 static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2355 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2356 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2358 wl12xx_cmd_stop_channel_switch(wl);
2359 ieee80211_chswitch_done(vif, false);
2362 /* to stop listening to a channel, we disconnect */
2363 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
2367 /* reset TX security counters on a clean disconnect */
2368 wlvif->tx_security_last_seq_lsb = 0;
2369 wlvif->tx_security_seq = 0;
2375 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2377 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2378 wlvif->rate_set = wlvif->basic_rate_set;
2381 static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2385 bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2387 if (idle == cur_idle)
2391 /* no need to croc if we weren't busy (e.g. during boot) */
2392 if (wl12xx_dev_role_started(wlvif)) {
2393 ret = wl12xx_stop_dev(wl, wlvif);
2398 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2399 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2402 ret = wl1271_acx_keep_alive_config(
2403 wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
2404 ACX_KEEP_ALIVE_TPL_INVALID);
2407 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2409 /* The current firmware only supports sched_scan in idle */
2410 if (wl->sched_scanning) {
2411 wl1271_scan_sched_scan_stop(wl);
2412 ieee80211_sched_scan_stopped(wl->hw);
2415 ret = wl12xx_start_dev(wl, wlvif);
2418 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2425 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2426 struct ieee80211_conf *conf, u32 changed)
2428 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2431 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2433 /* if the channel changes while joined, join again */
2434 if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
2435 ((wlvif->band != conf->channel->band) ||
2436 (wlvif->channel != channel) ||
2437 (wlvif->channel_type != conf->channel_type))) {
2438 /* send all pending packets */
2439 wl1271_tx_work_locked(wl);
2440 wlvif->band = conf->channel->band;
2441 wlvif->channel = channel;
2442 wlvif->channel_type = conf->channel_type;
2445 ret = wl1271_init_ap_rates(wl, wlvif);
2447 wl1271_error("AP rate policy change failed %d",
2451 * FIXME: the mac80211 should really provide a fixed
2452 * rate to use here. for now, just use the smallest
2453 * possible rate for the band as a fixed rate for
2454 * association frames and other control messages.
2456 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2457 wl1271_set_band_rate(wl, wlvif);
2460 wl1271_tx_min_rate_get(wl,
2461 wlvif->basic_rate_set);
2462 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2464 wl1271_warning("rate policy for channel "
2468 * change the ROC channel. do it only if we are
2469 * not idle. otherwise, CROC will be called
2472 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED,
2474 wl12xx_dev_role_started(wlvif) &&
2475 !(conf->flags & IEEE80211_CONF_IDLE)) {
2476 ret = wl12xx_stop_dev(wl, wlvif);
2480 ret = wl12xx_start_dev(wl, wlvif);
2487 if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) {
2489 if ((conf->flags & IEEE80211_CONF_PS) &&
2490 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
2491 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2496 if (wl->conf.conn.forced_ps) {
2497 ps_mode = STATION_POWER_SAVE_MODE;
2498 ps_mode_str = "forced";
2500 ps_mode = STATION_AUTO_PS_MODE;
2501 ps_mode_str = "auto";
2504 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
2506 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
2509 wl1271_warning("enter %s ps failed %d",
2512 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
2513 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2515 wl1271_debug(DEBUG_PSM, "auto ps disabled");
2517 ret = wl1271_ps_set_mode(wl, wlvif,
2518 STATION_ACTIVE_MODE);
2520 wl1271_warning("exit auto ps failed %d", ret);
2524 if (conf->power_level != wlvif->power_level) {
2525 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2529 wlvif->power_level = conf->power_level;
2535 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2537 struct wl1271 *wl = hw->priv;
2538 struct wl12xx_vif *wlvif;
2539 struct ieee80211_conf *conf = &hw->conf;
2540 int channel, ret = 0;
2542 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2544 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
2547 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2549 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2553 * mac80211 will go to idle nearly immediately after transmitting some
2554 * frames, such as the deauth. To make sure those frames reach the air,
2555 * wait here until the TX queue is fully flushed.
2557 if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
2558 (conf->flags & IEEE80211_CONF_IDLE))
2559 wl1271_tx_flush(wl);
2561 mutex_lock(&wl->mutex);
2563 /* we support configuring the channel and band even while off */
2564 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2565 wl->band = conf->channel->band;
2566 wl->channel = channel;
2567 wl->channel_type = conf->channel_type;
2570 if (changed & IEEE80211_CONF_CHANGE_POWER)
2571 wl->power_level = conf->power_level;
2573 if (unlikely(wl->state == WL1271_STATE_OFF))
2576 ret = wl1271_ps_elp_wakeup(wl);
2580 /* configure each interface */
2581 wl12xx_for_each_wlvif(wl, wlvif) {
2582 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2588 wl1271_ps_elp_sleep(wl);
2591 mutex_unlock(&wl->mutex);
2596 struct wl1271_filter_params {
2599 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2602 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2603 struct netdev_hw_addr_list *mc_list)
2605 struct wl1271_filter_params *fp;
2606 struct netdev_hw_addr *ha;
2607 struct wl1271 *wl = hw->priv;
2609 if (unlikely(wl->state == WL1271_STATE_OFF))
2612 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2614 wl1271_error("Out of memory setting filters.");
2618 /* update multicast filtering parameters */
2619 fp->mc_list_length = 0;
2620 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2621 fp->enabled = false;
2624 netdev_hw_addr_list_for_each(ha, mc_list) {
2625 memcpy(fp->mc_list[fp->mc_list_length],
2626 ha->addr, ETH_ALEN);
2627 fp->mc_list_length++;
2631 return (u64)(unsigned long)fp;
2634 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2637 FIF_BCN_PRBRESP_PROMISC | \
2641 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2642 unsigned int changed,
2643 unsigned int *total, u64 multicast)
2645 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2646 struct wl1271 *wl = hw->priv;
2647 struct wl12xx_vif *wlvif;
2651 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2652 " total %x", changed, *total);
2654 mutex_lock(&wl->mutex);
2656 *total &= WL1271_SUPPORTED_FILTERS;
2657 changed &= WL1271_SUPPORTED_FILTERS;
2659 if (unlikely(wl->state == WL1271_STATE_OFF))
2662 ret = wl1271_ps_elp_wakeup(wl);
2666 wl12xx_for_each_wlvif(wl, wlvif) {
2667 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
2668 if (*total & FIF_ALLMULTI)
2669 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2673 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2676 fp->mc_list_length);
2683 * the fw doesn't provide an api to configure the filters. instead,
2684 * the filters configuration is based on the active roles / ROC
2689 wl1271_ps_elp_sleep(wl);
2692 mutex_unlock(&wl->mutex);
2696 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2697 u8 id, u8 key_type, u8 key_size,
2698 const u8 *key, u8 hlid, u32 tx_seq_32,
2701 struct wl1271_ap_key *ap_key;
2704 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
2706 if (key_size > MAX_KEY_SIZE)
2710 * Find next free entry in ap_keys. Also check we are not replacing
2713 for (i = 0; i < MAX_NUM_KEYS; i++) {
2714 if (wlvif->ap.recorded_keys[i] == NULL)
2717 if (wlvif->ap.recorded_keys[i]->id == id) {
2718 wl1271_warning("trying to record key replacement");
2723 if (i == MAX_NUM_KEYS)
2726 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
2731 ap_key->key_type = key_type;
2732 ap_key->key_size = key_size;
2733 memcpy(ap_key->key, key, key_size);
2734 ap_key->hlid = hlid;
2735 ap_key->tx_seq_32 = tx_seq_32;
2736 ap_key->tx_seq_16 = tx_seq_16;
2738 wlvif->ap.recorded_keys[i] = ap_key;
2742 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2746 for (i = 0; i < MAX_NUM_KEYS; i++) {
2747 kfree(wlvif->ap.recorded_keys[i]);
2748 wlvif->ap.recorded_keys[i] = NULL;
2752 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2755 struct wl1271_ap_key *key;
2756 bool wep_key_added = false;
2758 for (i = 0; i < MAX_NUM_KEYS; i++) {
2760 if (wlvif->ap.recorded_keys[i] == NULL)
2763 key = wlvif->ap.recorded_keys[i];
2765 if (hlid == WL12XX_INVALID_LINK_ID)
2766 hlid = wlvif->ap.bcast_hlid;
2768 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
2769 key->id, key->key_type,
2770 key->key_size, key->key,
2771 hlid, key->tx_seq_32,
2776 if (key->key_type == KEY_WEP)
2777 wep_key_added = true;
2780 if (wep_key_added) {
2781 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
2782 wlvif->ap.bcast_hlid);
2788 wl1271_free_ap_keys(wl, wlvif);
2792 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2793 u16 action, u8 id, u8 key_type,
2794 u8 key_size, const u8 *key, u32 tx_seq_32,
2795 u16 tx_seq_16, struct ieee80211_sta *sta)
2798 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2801 * A role set to GEM cipher requires different Tx settings (namely
2802 * spare blocks). Note when we are in this mode so the HW can adjust.
2804 if (key_type == KEY_GEM) {
2805 if (action == KEY_ADD_OR_REPLACE)
2806 wlvif->is_gem = true;
2807 else if (action == KEY_REMOVE)
2808 wlvif->is_gem = false;
2812 struct wl1271_station *wl_sta;
2816 wl_sta = (struct wl1271_station *)sta->drv_priv;
2817 hlid = wl_sta->hlid;
2819 hlid = wlvif->ap.bcast_hlid;
2822 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
2824 * We do not support removing keys after AP shutdown.
2825 * Pretend we do to make mac80211 happy.
2827 if (action != KEY_ADD_OR_REPLACE)
2830 ret = wl1271_record_ap_key(wl, wlvif, id,
2832 key, hlid, tx_seq_32,
2835 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
2836 id, key_type, key_size,
2837 key, hlid, tx_seq_32,
2845 static const u8 bcast_addr[ETH_ALEN] = {
2846 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2849 addr = sta ? sta->addr : bcast_addr;
2851 if (is_zero_ether_addr(addr)) {
2852 /* We dont support TX only encryption */
2856 /* The wl1271 does not allow to remove unicast keys - they
2857 will be cleared automatically on next CMD_JOIN. Ignore the
2858 request silently, as we dont want the mac80211 to emit
2859 an error message. */
2860 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
2863 /* don't remove key if hlid was already deleted */
2864 if (action == KEY_REMOVE &&
2865 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
2868 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
2869 id, key_type, key_size,
2870 key, addr, tx_seq_32,
2875 /* the default WEP key needs to be configured at least once */
2876 if (key_type == KEY_WEP) {
2877 ret = wl12xx_cmd_set_default_wep_key(wl,
2888 static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2889 struct ieee80211_vif *vif,
2890 struct ieee80211_sta *sta,
2891 struct ieee80211_key_conf *key_conf)
2893 struct wl1271 *wl = hw->priv;
2894 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2900 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
2902 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
2903 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
2904 key_conf->cipher, key_conf->keyidx,
2905 key_conf->keylen, key_conf->flags);
2906 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
2908 mutex_lock(&wl->mutex);
2910 if (unlikely(wl->state == WL1271_STATE_OFF)) {
2915 ret = wl1271_ps_elp_wakeup(wl);
2919 switch (key_conf->cipher) {
2920 case WLAN_CIPHER_SUITE_WEP40:
2921 case WLAN_CIPHER_SUITE_WEP104:
2924 key_conf->hw_key_idx = key_conf->keyidx;
2926 case WLAN_CIPHER_SUITE_TKIP:
2927 key_type = KEY_TKIP;
2929 key_conf->hw_key_idx = key_conf->keyidx;
2930 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
2931 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
2933 case WLAN_CIPHER_SUITE_CCMP:
2936 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2937 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
2938 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
2940 case WL1271_CIPHER_SUITE_GEM:
2942 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
2943 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
2946 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
2954 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
2955 key_conf->keyidx, key_type,
2956 key_conf->keylen, key_conf->key,
2957 tx_seq_32, tx_seq_16, sta);
2959 wl1271_error("Could not add or replace key");
2964 * reconfiguring arp response if the unicast (or common)
2965 * encryption key type was changed
2967 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
2968 (sta || key_type == KEY_WEP) &&
2969 wlvif->encryption_type != key_type) {
2970 wlvif->encryption_type = key_type;
2971 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
2973 wl1271_warning("build arp rsp failed: %d", ret);
2980 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
2981 key_conf->keyidx, key_type,
2982 key_conf->keylen, key_conf->key,
2985 wl1271_error("Could not remove key");
2991 wl1271_error("Unsupported key cmd 0x%x", cmd);
2997 wl1271_ps_elp_sleep(wl);
3000 mutex_unlock(&wl->mutex);
3005 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3006 struct ieee80211_vif *vif,
3007 struct cfg80211_scan_request *req)
3009 struct wl1271 *wl = hw->priv;
3014 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3017 ssid = req->ssids[0].ssid;
3018 len = req->ssids[0].ssid_len;
3021 mutex_lock(&wl->mutex);
3023 if (wl->state == WL1271_STATE_OFF) {
3025 * We cannot return -EBUSY here because cfg80211 will expect
3026 * a call to ieee80211_scan_completed if we do - in this case
3027 * there won't be any call.
3033 ret = wl1271_ps_elp_wakeup(wl);
3037 /* fail if there is any role in ROC */
3038 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3039 /* don't allow scanning right now */
3044 ret = wl1271_scan(hw->priv, vif, ssid, len, req);
3046 wl1271_ps_elp_sleep(wl);
3048 mutex_unlock(&wl->mutex);
3053 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3054 struct ieee80211_vif *vif)
3056 struct wl1271 *wl = hw->priv;
3059 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3061 mutex_lock(&wl->mutex);
3063 if (wl->state == WL1271_STATE_OFF)
3066 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3069 ret = wl1271_ps_elp_wakeup(wl);
3073 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3074 ret = wl1271_scan_stop(wl);
3080 * Rearm the tx watchdog just before idling scan. This
3081 * prevents just-finished scans from triggering the watchdog
3083 wl12xx_rearm_tx_watchdog_locked(wl);
3085 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3086 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3087 wl->scan_vif = NULL;
3088 wl->scan.req = NULL;
3089 ieee80211_scan_completed(wl->hw, true);
3092 wl1271_ps_elp_sleep(wl);
3094 mutex_unlock(&wl->mutex);
3096 cancel_delayed_work_sync(&wl->scan_complete_work);
3099 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3100 struct ieee80211_vif *vif,
3101 struct cfg80211_sched_scan_request *req,
3102 struct ieee80211_sched_scan_ies *ies)
3104 struct wl1271 *wl = hw->priv;
3105 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3108 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3110 mutex_lock(&wl->mutex);
3112 if (wl->state == WL1271_STATE_OFF) {
3117 ret = wl1271_ps_elp_wakeup(wl);
3121 ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
3125 ret = wl1271_scan_sched_scan_start(wl, wlvif);
3129 wl->sched_scanning = true;
3132 wl1271_ps_elp_sleep(wl);
3134 mutex_unlock(&wl->mutex);
3138 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3139 struct ieee80211_vif *vif)
3141 struct wl1271 *wl = hw->priv;
3144 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3146 mutex_lock(&wl->mutex);
3148 if (wl->state == WL1271_STATE_OFF)
3151 ret = wl1271_ps_elp_wakeup(wl);
3155 wl1271_scan_sched_scan_stop(wl);
3157 wl1271_ps_elp_sleep(wl);
3159 mutex_unlock(&wl->mutex);
3162 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3164 struct wl1271 *wl = hw->priv;
3167 mutex_lock(&wl->mutex);
3169 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3174 ret = wl1271_ps_elp_wakeup(wl);
3178 ret = wl1271_acx_frag_threshold(wl, value);
3180 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3182 wl1271_ps_elp_sleep(wl);
3185 mutex_unlock(&wl->mutex);
3190 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3192 struct wl1271 *wl = hw->priv;
3193 struct wl12xx_vif *wlvif;
3196 mutex_lock(&wl->mutex);
3198 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3203 ret = wl1271_ps_elp_wakeup(wl);
3207 wl12xx_for_each_wlvif(wl, wlvif) {
3208 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3210 wl1271_warning("set rts threshold failed: %d", ret);
3212 wl1271_ps_elp_sleep(wl);
3215 mutex_unlock(&wl->mutex);
3220 static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
3223 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3225 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
3229 wl1271_error("No SSID in IEs!");
3234 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
3235 wl1271_error("SSID is too long!");
3239 wlvif->ssid_len = ssid_len;
3240 memcpy(wlvif->ssid, ptr+2, ssid_len);
3244 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3247 const u8 *next, *end = skb->data + skb->len;
3248 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3249 skb->len - ieoffset);
3254 memmove(ie, next, end - next);
3255 skb_trim(skb, skb->len - len);
3258 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3259 unsigned int oui, u8 oui_type,
3263 const u8 *next, *end = skb->data + skb->len;
3264 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3265 skb->data + ieoffset,
3266 skb->len - ieoffset);
3271 memmove(ie, next, end - next);
3272 skb_trim(skb, skb->len - len);
3275 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3276 struct ieee80211_vif *vif)
3278 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3279 struct sk_buff *skb;
3282 skb = ieee80211_proberesp_get(wl->hw, vif);
3286 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3287 CMD_TEMPL_AP_PROBE_RESPONSE,
3296 wl1271_debug(DEBUG_AP, "probe response updated");
3297 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3303 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3304 struct ieee80211_vif *vif,
3306 size_t probe_rsp_len,
3309 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3310 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3311 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3312 int ssid_ie_offset, ie_offset, templ_len;
3315 /* no need to change probe response if the SSID is set correctly */
3316 if (wlvif->ssid_len > 0)
3317 return wl1271_cmd_template_set(wl, wlvif->role_id,
3318 CMD_TEMPL_AP_PROBE_RESPONSE,
3323 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3324 wl1271_error("probe_rsp template too big");
3328 /* start searching from IE offset */
3329 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3331 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3332 probe_rsp_len - ie_offset);
3334 wl1271_error("No SSID in beacon!");
3338 ssid_ie_offset = ptr - probe_rsp_data;
3339 ptr += (ptr[1] + 2);
3341 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3343 /* insert SSID from bss_conf */
3344 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3345 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3346 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3347 bss_conf->ssid, bss_conf->ssid_len);
3348 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3350 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3351 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3352 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3354 return wl1271_cmd_template_set(wl, wlvif->role_id,
3355 CMD_TEMPL_AP_PROBE_RESPONSE,
3361 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3362 struct ieee80211_vif *vif,
3363 struct ieee80211_bss_conf *bss_conf,
3366 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3369 if (changed & BSS_CHANGED_ERP_SLOT) {
3370 if (bss_conf->use_short_slot)
3371 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3373 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3375 wl1271_warning("Set slot time failed %d", ret);
3380 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3381 if (bss_conf->use_short_preamble)
3382 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3384 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3387 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3388 if (bss_conf->use_cts_prot)
3389 ret = wl1271_acx_cts_protect(wl, wlvif,
3392 ret = wl1271_acx_cts_protect(wl, wlvif,
3393 CTSPROTECT_DISABLE);
3395 wl1271_warning("Set ctsprotect failed %d", ret);
3404 static int wlcore_set_beacon_template(struct wl1271 *wl,
3405 struct ieee80211_vif *vif,
3408 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3409 struct ieee80211_hdr *hdr;
3412 int ieoffset = offsetof(struct ieee80211_mgmt,
3414 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3422 wl1271_debug(DEBUG_MASTER, "beacon updated");
3424 ret = wl1271_ssid_set(vif, beacon, ieoffset);
3426 dev_kfree_skb(beacon);
3429 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3430 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3432 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3437 dev_kfree_skb(beacon);
3442 * In case we already have a probe-resp beacon set explicitly
3443 * by usermode, don't use the beacon data.
3445 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3448 /* remove TIM ie from probe response */
3449 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3452 * remove p2p ie from probe response.
3453 * the fw reponds to probe requests that don't include
3454 * the p2p ie. probe requests with p2p ie will be passed,
3455 * and will be responded by the supplicant (the spec
3456 * forbids including the p2p ie when responding to probe
3457 * requests that didn't include it).
3459 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3460 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3462 hdr = (struct ieee80211_hdr *) beacon->data;
3463 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3464 IEEE80211_STYPE_PROBE_RESP);
3466 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3471 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3472 CMD_TEMPL_PROBE_RESPONSE,
3477 dev_kfree_skb(beacon);
3485 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3486 struct ieee80211_vif *vif,
3487 struct ieee80211_bss_conf *bss_conf,
3490 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3491 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3494 if ((changed & BSS_CHANGED_BEACON_INT)) {
3495 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3496 bss_conf->beacon_int);
3498 wlvif->beacon_int = bss_conf->beacon_int;
3501 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3502 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3504 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3507 if ((changed & BSS_CHANGED_BEACON)) {
3508 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3515 wl1271_error("beacon info change failed: %d", ret);
3519 /* AP mode changes */
3520 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3521 struct ieee80211_vif *vif,
3522 struct ieee80211_bss_conf *bss_conf,
3525 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3528 if ((changed & BSS_CHANGED_BASIC_RATES)) {
3529 u32 rates = bss_conf->basic_rates;
3531 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3533 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3534 wlvif->basic_rate_set);
3536 ret = wl1271_init_ap_rates(wl, wlvif);
3538 wl1271_error("AP rate policy change failed %d", ret);
3542 ret = wl1271_ap_init_templates(wl, vif);
3546 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3550 ret = wlcore_set_beacon_template(wl, vif, true);
3555 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3559 if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
3560 if (bss_conf->enable_beacon) {
3561 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3562 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3566 ret = wl1271_ap_init_hwenc(wl, wlvif);
3570 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3571 wl1271_debug(DEBUG_AP, "started AP");
3574 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3575 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3579 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3580 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3582 wl1271_debug(DEBUG_AP, "stopped AP");
3587 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3591 /* Handle HT information change */
3592 if ((changed & BSS_CHANGED_HT) &&
3593 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3594 ret = wl1271_acx_set_ht_information(wl, wlvif,
3595 bss_conf->ht_operation_mode);
3597 wl1271_warning("Set ht information failed %d", ret);
3606 /* STA/IBSS mode changes */
3607 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3608 struct ieee80211_vif *vif,
3609 struct ieee80211_bss_conf *bss_conf,
3612 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3613 bool do_join = false, set_assoc = false;
3614 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
3615 bool ibss_joined = false;
3616 u32 sta_rate_set = 0;
3618 struct ieee80211_sta *sta;
3619 bool sta_exists = false;
3620 struct ieee80211_sta_ht_cap sta_ht_cap;
3623 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
3629 if (changed & BSS_CHANGED_IBSS) {
3630 if (bss_conf->ibss_joined) {
3631 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
3634 if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
3636 wl1271_unjoin(wl, wlvif);
3640 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
3643 /* Need to update the SSID (for filtering etc) */
3644 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
3647 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
3648 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
3649 bss_conf->enable_beacon ? "enabled" : "disabled");
3654 if (changed & BSS_CHANGED_IDLE && !is_ibss) {
3655 ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
3657 wl1271_warning("idle mode change failed %d", ret);
3660 if ((changed & BSS_CHANGED_CQM)) {
3661 bool enable = false;
3662 if (bss_conf->cqm_rssi_thold)
3664 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
3665 bss_conf->cqm_rssi_thold,
3666 bss_conf->cqm_rssi_hyst);
3669 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
3672 if (changed & BSS_CHANGED_BSSID)
3673 if (!is_zero_ether_addr(bss_conf->bssid)) {
3674 ret = wl12xx_cmd_build_null_data(wl, wlvif);
3678 ret = wl1271_build_qos_null_data(wl, vif);
3683 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
3685 sta = ieee80211_find_sta(vif, bss_conf->bssid);
3689 /* save the supp_rates of the ap */
3690 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
3691 if (sta->ht_cap.ht_supported)
3693 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
3694 (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
3695 sta_ht_cap = sta->ht_cap;
3702 if ((changed & BSS_CHANGED_ASSOC)) {
3703 if (bss_conf->assoc) {
3706 wlvif->aid = bss_conf->aid;
3707 wlvif->channel_type = bss_conf->channel_type;
3708 wlvif->beacon_int = bss_conf->beacon_int;
3712 /* Cancel connection_loss_work */
3713 cancel_delayed_work_sync(&wl->connection_loss_work);
3716 * use basic rates from AP, and determine lowest rate
3717 * to use with control frames.
3719 rates = bss_conf->basic_rates;
3720 wlvif->basic_rate_set =
3721 wl1271_tx_enabled_rates_get(wl, rates,
3724 wl1271_tx_min_rate_get(wl,
3725 wlvif->basic_rate_set);
3728 wl1271_tx_enabled_rates_get(wl,
3731 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3736 * with wl1271, we don't need to update the
3737 * beacon_int and dtim_period, because the firmware
3738 * updates it by itself when the first beacon is
3739 * received after a join.
3741 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
3746 * Get a template for hardware connection maintenance
3748 dev_kfree_skb(wlvif->probereq);
3749 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
3752 ieoffset = offsetof(struct ieee80211_mgmt,
3753 u.probe_req.variable);
3754 wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
3756 /* enable the connection monitoring feature */
3757 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
3761 /* use defaults when not associated */
3763 !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
3766 !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
3770 /* free probe-request template */
3771 dev_kfree_skb(wlvif->probereq);
3772 wlvif->probereq = NULL;
3774 /* revert back to minimum rates for the current band */
3775 wl1271_set_band_rate(wl, wlvif);
3777 wl1271_tx_min_rate_get(wl,
3778 wlvif->basic_rate_set);
3779 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3783 /* disable connection monitor features */
3784 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3786 /* Disable the keep-alive feature */
3787 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3791 /* restore the bssid filter and go to dummy bssid */
3794 * we might have to disable roc, if there was
3795 * no IF_OPER_UP notification.
3798 ret = wl12xx_croc(wl, wlvif->role_id);
3803 * (we also need to disable roc in case of
3804 * roaming on the same channel. until we will
3805 * have a better flow...)
3807 if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
3808 ret = wl12xx_croc(wl,
3809 wlvif->dev_role_id);
3814 wl1271_unjoin(wl, wlvif);
3815 if (!bss_conf->idle)
3816 wl12xx_start_dev(wl, wlvif);
3821 if (changed & BSS_CHANGED_IBSS) {
3822 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
3823 bss_conf->ibss_joined);
3825 if (bss_conf->ibss_joined) {
3826 u32 rates = bss_conf->basic_rates;
3827 wlvif->basic_rate_set =
3828 wl1271_tx_enabled_rates_get(wl, rates,
3831 wl1271_tx_min_rate_get(wl,
3832 wlvif->basic_rate_set);
3834 /* by default, use 11b + OFDM rates */
3835 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
3836 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3842 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3847 ret = wl1271_join(wl, wlvif, set_assoc);
3849 wl1271_warning("cmd join failed %d", ret);
3853 /* ROC until connected (after EAPOL exchange) */
3855 ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
3859 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
3860 wl12xx_set_authorized(wl, wlvif);
3863 * stop device role if started (we might already be in
3866 if (wl12xx_dev_role_started(wlvif)) {
3867 ret = wl12xx_stop_dev(wl, wlvif);
3873 /* Handle new association with HT. Do this after join. */
3875 if ((changed & BSS_CHANGED_HT) &&
3876 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3877 ret = wl1271_acx_set_ht_capabilities(wl,
3882 wl1271_warning("Set ht cap true failed %d",
3887 /* handle new association without HT and disassociation */
3888 else if (changed & BSS_CHANGED_ASSOC) {
3889 ret = wl1271_acx_set_ht_capabilities(wl,
3894 wl1271_warning("Set ht cap false failed %d",
3901 /* Handle HT information change. Done after join. */
3902 if ((changed & BSS_CHANGED_HT) &&
3903 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3904 ret = wl1271_acx_set_ht_information(wl, wlvif,
3905 bss_conf->ht_operation_mode);
3907 wl1271_warning("Set ht information failed %d", ret);
3912 /* Handle arp filtering. Done after join. */
3913 if ((changed & BSS_CHANGED_ARP_FILTER) ||
3914 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
3915 __be32 addr = bss_conf->arp_addr_list[0];
3916 wlvif->sta.qos = bss_conf->qos;
3917 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
3919 if (bss_conf->arp_addr_cnt == 1 &&
3920 bss_conf->arp_filter_enabled) {
3921 wlvif->ip_addr = addr;
3923 * The template should have been configured only upon
3924 * association. however, it seems that the correct ip
3925 * isn't being set (when sending), so we have to
3926 * reconfigure the template upon every ip change.
3928 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3930 wl1271_warning("build arp rsp failed: %d", ret);
3934 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
3935 (ACX_ARP_FILTER_ARP_FILTERING |
3936 ACX_ARP_FILTER_AUTO_ARP),
3940 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
3951 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
3952 struct ieee80211_vif *vif,
3953 struct ieee80211_bss_conf *bss_conf,
3956 struct wl1271 *wl = hw->priv;
3957 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3958 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3961 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
3964 mutex_lock(&wl->mutex);
3966 if (unlikely(wl->state == WL1271_STATE_OFF))
3969 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
3972 ret = wl1271_ps_elp_wakeup(wl);
3977 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
3979 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
3981 wl1271_ps_elp_sleep(wl);
3984 mutex_unlock(&wl->mutex);
3987 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
3988 struct ieee80211_vif *vif, u16 queue,
3989 const struct ieee80211_tx_queue_params *params)
3991 struct wl1271 *wl = hw->priv;
3992 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3996 mutex_lock(&wl->mutex);
3998 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4001 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4003 ps_scheme = CONF_PS_SCHEME_LEGACY;
4005 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4008 ret = wl1271_ps_elp_wakeup(wl);
4013 * the txop is confed in units of 32us by the mac80211,
4016 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4017 params->cw_min, params->cw_max,
4018 params->aifs, params->txop << 5);
4022 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4023 CONF_CHANNEL_TYPE_EDCF,
4024 wl1271_tx_get_queue(queue),
4025 ps_scheme, CONF_ACK_POLICY_LEGACY,
4029 wl1271_ps_elp_sleep(wl);
4032 mutex_unlock(&wl->mutex);
4037 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4038 struct ieee80211_vif *vif)
4041 struct wl1271 *wl = hw->priv;
4042 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4043 u64 mactime = ULLONG_MAX;
4046 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4048 mutex_lock(&wl->mutex);
4050 if (unlikely(wl->state == WL1271_STATE_OFF))
4053 ret = wl1271_ps_elp_wakeup(wl);
4057 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4062 wl1271_ps_elp_sleep(wl);
4065 mutex_unlock(&wl->mutex);
4069 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4070 struct survey_info *survey)
4072 struct wl1271 *wl = hw->priv;
4073 struct ieee80211_conf *conf = &hw->conf;
4078 survey->channel = conf->channel;
4079 survey->filled = SURVEY_INFO_NOISE_DBM;
4080 survey->noise = wl->noise;
4085 static int wl1271_allocate_sta(struct wl1271 *wl,
4086 struct wl12xx_vif *wlvif,
4087 struct ieee80211_sta *sta)
4089 struct wl1271_station *wl_sta;
4093 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4094 wl1271_warning("could not allocate HLID - too much stations");
4098 wl_sta = (struct wl1271_station *)sta->drv_priv;
4099 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4101 wl1271_warning("could not allocate HLID - too many links");
4105 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4106 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4107 wl->active_sta_count++;
4111 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4113 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4116 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4117 memset(wl->links[hlid].addr, 0, ETH_ALEN);
4118 wl->links[hlid].ba_bitmap = 0;
4119 __clear_bit(hlid, &wl->ap_ps_map);
4120 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4121 wl12xx_free_link(wl, wlvif, &hlid);
4122 wl->active_sta_count--;
4125 * rearm the tx watchdog when the last STA is freed - give the FW a
4126 * chance to return STA-buffered packets before complaining.
4128 if (wl->active_sta_count == 0)
4129 wl12xx_rearm_tx_watchdog_locked(wl);
4132 static int wl12xx_sta_add(struct wl1271 *wl,
4133 struct wl12xx_vif *wlvif,
4134 struct ieee80211_sta *sta)
4136 struct wl1271_station *wl_sta;
4140 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4142 ret = wl1271_allocate_sta(wl, wlvif, sta);
4146 wl_sta = (struct wl1271_station *)sta->drv_priv;
4147 hlid = wl_sta->hlid;
4149 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4151 wl1271_free_sta(wl, wlvif, hlid);
4156 static int wl12xx_sta_remove(struct wl1271 *wl,
4157 struct wl12xx_vif *wlvif,
4158 struct ieee80211_sta *sta)
4160 struct wl1271_station *wl_sta;
4163 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4165 wl_sta = (struct wl1271_station *)sta->drv_priv;
4167 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4170 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4174 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4178 static int wl12xx_update_sta_state(struct wl1271 *wl,
4179 struct wl12xx_vif *wlvif,
4180 struct ieee80211_sta *sta,
4181 enum ieee80211_sta_state old_state,
4182 enum ieee80211_sta_state new_state)
4184 struct wl1271_station *wl_sta;
4186 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4187 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4190 wl_sta = (struct wl1271_station *)sta->drv_priv;
4191 hlid = wl_sta->hlid;
4193 /* Add station (AP mode) */
4195 old_state == IEEE80211_STA_NOTEXIST &&
4196 new_state == IEEE80211_STA_NONE)
4197 return wl12xx_sta_add(wl, wlvif, sta);
4199 /* Remove station (AP mode) */
4201 old_state == IEEE80211_STA_NONE &&
4202 new_state == IEEE80211_STA_NOTEXIST) {
4204 wl12xx_sta_remove(wl, wlvif, sta);
4208 /* Authorize station (AP mode) */
4210 new_state == IEEE80211_STA_AUTHORIZED) {
4211 ret = wl12xx_cmd_set_peer_state(wl, hlid);
4215 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4220 /* Authorize station */
4222 new_state == IEEE80211_STA_AUTHORIZED) {
4223 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4224 return wl12xx_set_authorized(wl, wlvif);
4228 old_state == IEEE80211_STA_AUTHORIZED &&
4229 new_state == IEEE80211_STA_ASSOC) {
4230 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4237 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4238 struct ieee80211_vif *vif,
4239 struct ieee80211_sta *sta,
4240 enum ieee80211_sta_state old_state,
4241 enum ieee80211_sta_state new_state)
4243 struct wl1271 *wl = hw->priv;
4244 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4247 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4248 sta->aid, old_state, new_state);
4250 mutex_lock(&wl->mutex);
4252 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4257 ret = wl1271_ps_elp_wakeup(wl);
4261 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4263 wl1271_ps_elp_sleep(wl);
4265 mutex_unlock(&wl->mutex);
4266 if (new_state < old_state)
4271 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4272 struct ieee80211_vif *vif,
4273 enum ieee80211_ampdu_mlme_action action,
4274 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4277 struct wl1271 *wl = hw->priv;
4278 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4280 u8 hlid, *ba_bitmap;
4282 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4285 /* sanity check - the fields in FW are only 8bits wide */
4286 if (WARN_ON(tid > 0xFF))
4289 mutex_lock(&wl->mutex);
4291 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4296 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4297 hlid = wlvif->sta.hlid;
4298 ba_bitmap = &wlvif->sta.ba_rx_bitmap;
4299 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4300 struct wl1271_station *wl_sta;
4302 wl_sta = (struct wl1271_station *)sta->drv_priv;
4303 hlid = wl_sta->hlid;
4304 ba_bitmap = &wl->links[hlid].ba_bitmap;
4310 ret = wl1271_ps_elp_wakeup(wl);
4314 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4318 case IEEE80211_AMPDU_RX_START:
4319 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4324 if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
4326 wl1271_error("exceeded max RX BA sessions");
4330 if (*ba_bitmap & BIT(tid)) {
4332 wl1271_error("cannot enable RX BA session on active "
4337 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4340 *ba_bitmap |= BIT(tid);
4341 wl->ba_rx_session_count++;
4345 case IEEE80211_AMPDU_RX_STOP:
4346 if (!(*ba_bitmap & BIT(tid))) {
4348 wl1271_error("no active RX BA session on tid: %d",
4353 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4356 *ba_bitmap &= ~BIT(tid);
4357 wl->ba_rx_session_count--;
4362 * The BA initiator session management in FW independently.
4363 * Falling break here on purpose for all TX APDU commands.
4365 case IEEE80211_AMPDU_TX_START:
4366 case IEEE80211_AMPDU_TX_STOP:
4367 case IEEE80211_AMPDU_TX_OPERATIONAL:
4372 wl1271_error("Incorrect ampdu action id=%x\n", action);
4376 wl1271_ps_elp_sleep(wl);
4379 mutex_unlock(&wl->mutex);
4384 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4385 struct ieee80211_vif *vif,
4386 const struct cfg80211_bitrate_mask *mask)
4388 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4389 struct wl1271 *wl = hw->priv;
4392 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4393 mask->control[NL80211_BAND_2GHZ].legacy,
4394 mask->control[NL80211_BAND_5GHZ].legacy);
4396 mutex_lock(&wl->mutex);
4398 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
4399 wlvif->bitrate_masks[i] =
4400 wl1271_tx_enabled_rates_get(wl,
4401 mask->control[i].legacy,
4404 if (unlikely(wl->state == WL1271_STATE_OFF))
4407 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4408 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4410 ret = wl1271_ps_elp_wakeup(wl);
4414 wl1271_set_band_rate(wl, wlvif);
4416 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4417 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4419 wl1271_ps_elp_sleep(wl);
4422 mutex_unlock(&wl->mutex);
4427 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4428 struct ieee80211_channel_switch *ch_switch)
4430 struct wl1271 *wl = hw->priv;
4431 struct wl12xx_vif *wlvif;
4434 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4436 wl1271_tx_flush(wl);
4438 mutex_lock(&wl->mutex);
4440 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4441 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4442 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4443 ieee80211_chswitch_done(vif, false);
4448 ret = wl1271_ps_elp_wakeup(wl);
4452 /* TODO: change mac80211 to pass vif as param */
4453 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4454 ret = wl12xx_cmd_channel_switch(wl, wlvif, ch_switch);
4457 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4460 wl1271_ps_elp_sleep(wl);
4463 mutex_unlock(&wl->mutex);
4466 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
4468 struct wl1271 *wl = hw->priv;
4471 mutex_lock(&wl->mutex);
4473 if (unlikely(wl->state == WL1271_STATE_OFF))
4476 /* packets are considered pending if in the TX queue or the FW */
4477 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
4479 mutex_unlock(&wl->mutex);
4484 /* can't be const, mac80211 writes to this */
4485 static struct ieee80211_rate wl1271_rates[] = {
4487 .hw_value = CONF_HW_BIT_RATE_1MBPS,
4488 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
4490 .hw_value = CONF_HW_BIT_RATE_2MBPS,
4491 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
4492 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4494 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
4495 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
4496 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4498 .hw_value = CONF_HW_BIT_RATE_11MBPS,
4499 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
4500 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4502 .hw_value = CONF_HW_BIT_RATE_6MBPS,
4503 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4505 .hw_value = CONF_HW_BIT_RATE_9MBPS,
4506 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4508 .hw_value = CONF_HW_BIT_RATE_12MBPS,
4509 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4511 .hw_value = CONF_HW_BIT_RATE_18MBPS,
4512 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4514 .hw_value = CONF_HW_BIT_RATE_24MBPS,
4515 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4517 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4518 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4520 .hw_value = CONF_HW_BIT_RATE_48MBPS,
4521 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4523 .hw_value = CONF_HW_BIT_RATE_54MBPS,
4524 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4527 /* can't be const, mac80211 writes to this */
4528 static struct ieee80211_channel wl1271_channels[] = {
4529 { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
4530 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
4531 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
4532 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
4533 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
4534 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
4535 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
4536 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
4537 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
4538 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
4539 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
4540 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
4541 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
4542 { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
4545 /* can't be const, mac80211 writes to this */
4546 static struct ieee80211_supported_band wl1271_band_2ghz = {
4547 .channels = wl1271_channels,
4548 .n_channels = ARRAY_SIZE(wl1271_channels),
4549 .bitrates = wl1271_rates,
4550 .n_bitrates = ARRAY_SIZE(wl1271_rates),
4553 /* 5 GHz data rates for WL1273 */
4554 static struct ieee80211_rate wl1271_rates_5ghz[] = {
4556 .hw_value = CONF_HW_BIT_RATE_6MBPS,
4557 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4559 .hw_value = CONF_HW_BIT_RATE_9MBPS,
4560 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4562 .hw_value = CONF_HW_BIT_RATE_12MBPS,
4563 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4565 .hw_value = CONF_HW_BIT_RATE_18MBPS,
4566 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4568 .hw_value = CONF_HW_BIT_RATE_24MBPS,
4569 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4571 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4572 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4574 .hw_value = CONF_HW_BIT_RATE_48MBPS,
4575 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4577 .hw_value = CONF_HW_BIT_RATE_54MBPS,
4578 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4581 /* 5 GHz band channels for WL1273 */
4582 static struct ieee80211_channel wl1271_channels_5ghz[] = {
4583 { .hw_value = 7, .center_freq = 5035, .max_power = 25 },
4584 { .hw_value = 8, .center_freq = 5040, .max_power = 25 },
4585 { .hw_value = 9, .center_freq = 5045, .max_power = 25 },
4586 { .hw_value = 11, .center_freq = 5055, .max_power = 25 },
4587 { .hw_value = 12, .center_freq = 5060, .max_power = 25 },
4588 { .hw_value = 16, .center_freq = 5080, .max_power = 25 },
4589 { .hw_value = 34, .center_freq = 5170, .max_power = 25 },
4590 { .hw_value = 36, .center_freq = 5180, .max_power = 25 },
4591 { .hw_value = 38, .center_freq = 5190, .max_power = 25 },
4592 { .hw_value = 40, .center_freq = 5200, .max_power = 25 },
4593 { .hw_value = 42, .center_freq = 5210, .max_power = 25 },
4594 { .hw_value = 44, .center_freq = 5220, .max_power = 25 },
4595 { .hw_value = 46, .center_freq = 5230, .max_power = 25 },
4596 { .hw_value = 48, .center_freq = 5240, .max_power = 25 },
4597 { .hw_value = 52, .center_freq = 5260, .max_power = 25 },
4598 { .hw_value = 56, .center_freq = 5280, .max_power = 25 },
4599 { .hw_value = 60, .center_freq = 5300, .max_power = 25 },
4600 { .hw_value = 64, .center_freq = 5320, .max_power = 25 },
4601 { .hw_value = 100, .center_freq = 5500, .max_power = 25 },
4602 { .hw_value = 104, .center_freq = 5520, .max_power = 25 },
4603 { .hw_value = 108, .center_freq = 5540, .max_power = 25 },
4604 { .hw_value = 112, .center_freq = 5560, .max_power = 25 },
4605 { .hw_value = 116, .center_freq = 5580, .max_power = 25 },
4606 { .hw_value = 120, .center_freq = 5600, .max_power = 25 },
4607 { .hw_value = 124, .center_freq = 5620, .max_power = 25 },
4608 { .hw_value = 128, .center_freq = 5640, .max_power = 25 },
4609 { .hw_value = 132, .center_freq = 5660, .max_power = 25 },
4610 { .hw_value = 136, .center_freq = 5680, .max_power = 25 },
4611 { .hw_value = 140, .center_freq = 5700, .max_power = 25 },
4612 { .hw_value = 149, .center_freq = 5745, .max_power = 25 },
4613 { .hw_value = 153, .center_freq = 5765, .max_power = 25 },
4614 { .hw_value = 157, .center_freq = 5785, .max_power = 25 },
4615 { .hw_value = 161, .center_freq = 5805, .max_power = 25 },
4616 { .hw_value = 165, .center_freq = 5825, .max_power = 25 },
4619 static struct ieee80211_supported_band wl1271_band_5ghz = {
4620 .channels = wl1271_channels_5ghz,
4621 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
4622 .bitrates = wl1271_rates_5ghz,
4623 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
4626 static const struct ieee80211_ops wl1271_ops = {
4627 .start = wl1271_op_start,
4628 .stop = wl1271_op_stop,
4629 .add_interface = wl1271_op_add_interface,
4630 .remove_interface = wl1271_op_remove_interface,
4631 .change_interface = wl12xx_op_change_interface,
4633 .suspend = wl1271_op_suspend,
4634 .resume = wl1271_op_resume,
4636 .config = wl1271_op_config,
4637 .prepare_multicast = wl1271_op_prepare_multicast,
4638 .configure_filter = wl1271_op_configure_filter,
4640 .set_key = wl1271_op_set_key,
4641 .hw_scan = wl1271_op_hw_scan,
4642 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
4643 .sched_scan_start = wl1271_op_sched_scan_start,
4644 .sched_scan_stop = wl1271_op_sched_scan_stop,
4645 .bss_info_changed = wl1271_op_bss_info_changed,
4646 .set_frag_threshold = wl1271_op_set_frag_threshold,
4647 .set_rts_threshold = wl1271_op_set_rts_threshold,
4648 .conf_tx = wl1271_op_conf_tx,
4649 .get_tsf = wl1271_op_get_tsf,
4650 .get_survey = wl1271_op_get_survey,
4651 .sta_state = wl12xx_op_sta_state,
4652 .ampdu_action = wl1271_op_ampdu_action,
4653 .tx_frames_pending = wl1271_tx_frames_pending,
4654 .set_bitrate_mask = wl12xx_set_bitrate_mask,
4655 .channel_switch = wl12xx_op_channel_switch,
4656 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
4660 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
4666 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
4667 wl1271_error("Illegal RX rate from HW: %d", rate);
4671 idx = wl->band_rate_to_idx[band][rate];
4672 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
4673 wl1271_error("Unsupported RX rate from HW: %d", rate);
4680 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
4681 struct device_attribute *attr,
4684 struct wl1271 *wl = dev_get_drvdata(dev);
4689 mutex_lock(&wl->mutex);
4690 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
4692 mutex_unlock(&wl->mutex);
4698 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
4699 struct device_attribute *attr,
4700 const char *buf, size_t count)
4702 struct wl1271 *wl = dev_get_drvdata(dev);
4706 ret = kstrtoul(buf, 10, &res);
4708 wl1271_warning("incorrect value written to bt_coex_mode");
4712 mutex_lock(&wl->mutex);
4716 if (res == wl->sg_enabled)
4719 wl->sg_enabled = res;
4721 if (wl->state == WL1271_STATE_OFF)
4724 ret = wl1271_ps_elp_wakeup(wl);
4728 wl1271_acx_sg_enable(wl, wl->sg_enabled);
4729 wl1271_ps_elp_sleep(wl);
4732 mutex_unlock(&wl->mutex);
4736 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
4737 wl1271_sysfs_show_bt_coex_state,
4738 wl1271_sysfs_store_bt_coex_state);
4740 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
4741 struct device_attribute *attr,
4744 struct wl1271 *wl = dev_get_drvdata(dev);
4749 mutex_lock(&wl->mutex);
4750 if (wl->hw_pg_ver >= 0)
4751 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
4753 len = snprintf(buf, len, "n/a\n");
4754 mutex_unlock(&wl->mutex);
4759 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
4760 wl1271_sysfs_show_hw_pg_ver, NULL);
4762 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
4763 struct bin_attribute *bin_attr,
4764 char *buffer, loff_t pos, size_t count)
4766 struct device *dev = container_of(kobj, struct device, kobj);
4767 struct wl1271 *wl = dev_get_drvdata(dev);
4771 ret = mutex_lock_interruptible(&wl->mutex);
4773 return -ERESTARTSYS;
4775 /* Let only one thread read the log at a time, blocking others */
4776 while (wl->fwlog_size == 0) {
4779 prepare_to_wait_exclusive(&wl->fwlog_waitq,
4781 TASK_INTERRUPTIBLE);
4783 if (wl->fwlog_size != 0) {
4784 finish_wait(&wl->fwlog_waitq, &wait);
4788 mutex_unlock(&wl->mutex);
4791 finish_wait(&wl->fwlog_waitq, &wait);
4793 if (signal_pending(current))
4794 return -ERESTARTSYS;
4796 ret = mutex_lock_interruptible(&wl->mutex);
4798 return -ERESTARTSYS;
4801 /* Check if the fwlog is still valid */
4802 if (wl->fwlog_size < 0) {
4803 mutex_unlock(&wl->mutex);
4807 /* Seeking is not supported - old logs are not kept. Disregard pos. */
4808 len = min(count, (size_t)wl->fwlog_size);
4809 wl->fwlog_size -= len;
4810 memcpy(buffer, wl->fwlog, len);
4812 /* Make room for new messages */
4813 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
4815 mutex_unlock(&wl->mutex);
4820 static struct bin_attribute fwlog_attr = {
4821 .attr = {.name = "fwlog", .mode = S_IRUSR},
4822 .read = wl1271_sysfs_read_fwlog,
4825 static void wl1271_connection_loss_work(struct work_struct *work)
4827 struct delayed_work *dwork;
4829 struct ieee80211_vif *vif;
4830 struct wl12xx_vif *wlvif;
4832 dwork = container_of(work, struct delayed_work, work);
4833 wl = container_of(dwork, struct wl1271, connection_loss_work);
4835 wl1271_info("Connection loss work.");
4837 mutex_lock(&wl->mutex);
4839 if (unlikely(wl->state == WL1271_STATE_OFF))
4842 /* Call mac80211 connection loss */
4843 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4844 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
4846 vif = wl12xx_wlvif_to_vif(wlvif);
4847 ieee80211_connection_loss(vif);
4850 mutex_unlock(&wl->mutex);
4853 static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
4854 u32 oui, u32 nic, int n)
4858 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x, n %d",
4861 if (nic + n - 1 > 0xffffff)
4862 wl1271_warning("NIC part of the MAC address wraps around!");
4864 for (i = 0; i < n; i++) {
4865 wl->addresses[i].addr[0] = (u8)(oui >> 16);
4866 wl->addresses[i].addr[1] = (u8)(oui >> 8);
4867 wl->addresses[i].addr[2] = (u8) oui;
4868 wl->addresses[i].addr[3] = (u8)(nic >> 16);
4869 wl->addresses[i].addr[4] = (u8)(nic >> 8);
4870 wl->addresses[i].addr[5] = (u8) nic;
4874 wl->hw->wiphy->n_addresses = n;
4875 wl->hw->wiphy->addresses = wl->addresses;
4878 static int wl12xx_get_hw_info(struct wl1271 *wl)
4882 ret = wl12xx_set_power_on(wl);
4886 wl->chip.id = wlcore_read_reg(wl, REG_CHIP_ID_B);
4888 wl->fuse_oui_addr = 0;
4889 wl->fuse_nic_addr = 0;
4891 wl->hw_pg_ver = wl->ops->get_pg_ver(wl);
4893 if (wl->ops->get_mac)
4894 wl->ops->get_mac(wl);
4896 wl1271_power_off(wl);
4901 static int wl1271_register_hw(struct wl1271 *wl)
4904 u32 oui_addr = 0, nic_addr = 0;
4906 if (wl->mac80211_registered)
4909 ret = wl1271_fetch_nvs(wl);
4911 /* NOTE: The wl->nvs->nvs element must be first, in
4912 * order to simplify the casting, we assume it is at
4913 * the beginning of the wl->nvs structure.
4915 u8 *nvs_ptr = (u8 *)wl->nvs;
4918 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
4920 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
4923 /* if the MAC address is zeroed in the NVS derive from fuse */
4924 if (oui_addr == 0 && nic_addr == 0) {
4925 oui_addr = wl->fuse_oui_addr;
4926 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
4927 nic_addr = wl->fuse_nic_addr + 1;
4930 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr, 2);
4932 ret = ieee80211_register_hw(wl->hw);
4934 wl1271_error("unable to register mac80211 hw: %d", ret);
4938 wl->mac80211_registered = true;
4940 wl1271_debugfs_init(wl);
4942 wl1271_notice("loaded");
4948 static void wl1271_unregister_hw(struct wl1271 *wl)
4951 wl1271_plt_stop(wl);
4953 ieee80211_unregister_hw(wl->hw);
4954 wl->mac80211_registered = false;
4958 static int wl1271_init_ieee80211(struct wl1271 *wl)
4960 static const u32 cipher_suites[] = {
4961 WLAN_CIPHER_SUITE_WEP40,
4962 WLAN_CIPHER_SUITE_WEP104,
4963 WLAN_CIPHER_SUITE_TKIP,
4964 WLAN_CIPHER_SUITE_CCMP,
4965 WL1271_CIPHER_SUITE_GEM,
4968 /* The tx descriptor buffer and the TKIP space. */
4969 wl->hw->extra_tx_headroom = WL1271_EXTRA_SPACE_TKIP +
4970 sizeof(struct wl1271_tx_hw_descr);
4973 /* FIXME: find a proper value */
4974 wl->hw->channel_change_time = 10000;
4975 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
4977 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
4978 IEEE80211_HW_SUPPORTS_PS |
4979 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
4980 IEEE80211_HW_SUPPORTS_UAPSD |
4981 IEEE80211_HW_HAS_RATE_CONTROL |
4982 IEEE80211_HW_CONNECTION_MONITOR |
4983 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
4984 IEEE80211_HW_SPECTRUM_MGMT |
4985 IEEE80211_HW_AP_LINK_PS |
4986 IEEE80211_HW_AMPDU_AGGREGATION |
4987 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
4988 IEEE80211_HW_SCAN_WHILE_IDLE;
4990 wl->hw->wiphy->cipher_suites = cipher_suites;
4991 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
4993 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
4994 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
4995 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
4996 wl->hw->wiphy->max_scan_ssids = 1;
4997 wl->hw->wiphy->max_sched_scan_ssids = 16;
4998 wl->hw->wiphy->max_match_sets = 16;
5000 * Maximum length of elements in scanning probe request templates
5001 * should be the maximum length possible for a template, without
5002 * the IEEE80211 header of the template
5004 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5005 sizeof(struct ieee80211_header);
5007 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5008 sizeof(struct ieee80211_header);
5010 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5011 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5013 /* make sure all our channels fit in the scanned_ch bitmask */
5014 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5015 ARRAY_SIZE(wl1271_channels_5ghz) >
5016 WL1271_MAX_CHANNELS);
5018 * We keep local copies of the band structs because we need to
5019 * modify them on a per-device basis.
5021 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5022 sizeof(wl1271_band_2ghz));
5023 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap, &wl->ht_cap,
5024 sizeof(wl->ht_cap));
5025 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5026 sizeof(wl1271_band_5ghz));
5027 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap, &wl->ht_cap,
5028 sizeof(wl->ht_cap));
5030 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5031 &wl->bands[IEEE80211_BAND_2GHZ];
5032 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5033 &wl->bands[IEEE80211_BAND_5GHZ];
5036 wl->hw->max_rates = 1;
5038 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5040 /* the FW answers probe-requests in AP-mode */
5041 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5042 wl->hw->wiphy->probe_resp_offload =
5043 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5044 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5045 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5047 SET_IEEE80211_DEV(wl->hw, wl->dev);
5049 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5050 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5052 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5057 #define WL1271_DEFAULT_CHANNEL 0
5059 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
5061 struct ieee80211_hw *hw;
5066 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5068 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5070 wl1271_error("could not alloc ieee80211_hw");
5076 memset(wl, 0, sizeof(*wl));
5078 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5080 wl1271_error("could not alloc wl priv");
5082 goto err_priv_alloc;
5085 INIT_LIST_HEAD(&wl->wlvif_list);
5089 for (i = 0; i < NUM_TX_QUEUES; i++)
5090 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5091 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5093 skb_queue_head_init(&wl->deferred_rx_queue);
5094 skb_queue_head_init(&wl->deferred_tx_queue);
5096 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5097 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5098 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5099 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5100 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5101 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5102 INIT_DELAYED_WORK(&wl->connection_loss_work,
5103 wl1271_connection_loss_work);
5105 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5106 if (!wl->freezable_wq) {
5111 wl->channel = WL1271_DEFAULT_CHANNEL;
5113 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5114 wl->band = IEEE80211_BAND_2GHZ;
5115 wl->channel_type = NL80211_CHAN_NO_HT;
5117 wl->sg_enabled = true;
5120 wl->ap_fw_ps_map = 0;
5122 wl->platform_quirks = 0;
5123 wl->sched_scanning = false;
5124 wl->system_hlid = WL12XX_SYSTEM_HLID;
5125 wl->active_sta_count = 0;
5127 init_waitqueue_head(&wl->fwlog_waitq);
5129 /* The system link is always allocated */
5130 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5132 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5133 for (i = 0; i < wl->num_tx_desc; i++)
5134 wl->tx_frames[i] = NULL;
5136 spin_lock_init(&wl->wl_lock);
5138 wl->state = WL1271_STATE_OFF;
5139 wl->fw_type = WL12XX_FW_TYPE_NONE;
5140 mutex_init(&wl->mutex);
5142 order = get_order(WL1271_AGGR_BUFFER_SIZE);
5143 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5144 if (!wl->aggr_buf) {
5149 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5150 if (!wl->dummy_packet) {
5155 /* Allocate one page for the FW log */
5156 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5159 goto err_dummy_packet;
5162 wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_KERNEL | GFP_DMA);
5171 free_page((unsigned long)wl->fwlog);
5174 dev_kfree_skb(wl->dummy_packet);
5177 free_pages((unsigned long)wl->aggr_buf, order);
5180 destroy_workqueue(wl->freezable_wq);
5183 wl1271_debugfs_exit(wl);
5187 ieee80211_free_hw(hw);
5191 return ERR_PTR(ret);
5193 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5195 int wlcore_free_hw(struct wl1271 *wl)
5197 /* Unblock any fwlog readers */
5198 mutex_lock(&wl->mutex);
5199 wl->fwlog_size = -1;
5200 wake_up_interruptible_all(&wl->fwlog_waitq);
5201 mutex_unlock(&wl->mutex);
5203 device_remove_bin_file(wl->dev, &fwlog_attr);
5205 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5207 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5208 free_page((unsigned long)wl->fwlog);
5209 dev_kfree_skb(wl->dummy_packet);
5210 free_pages((unsigned long)wl->aggr_buf,
5211 get_order(WL1271_AGGR_BUFFER_SIZE));
5213 wl1271_debugfs_exit(wl);
5217 wl->fw_type = WL12XX_FW_TYPE_NONE;
5221 kfree(wl->fw_status_1);
5222 kfree(wl->tx_res_if);
5223 destroy_workqueue(wl->freezable_wq);
5226 ieee80211_free_hw(wl->hw);
5230 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5232 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5234 struct wl1271 *wl = cookie;
5235 unsigned long flags;
5237 wl1271_debug(DEBUG_IRQ, "IRQ");
5239 /* complete the ELP completion */
5240 spin_lock_irqsave(&wl->wl_lock, flags);
5241 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5242 if (wl->elp_compl) {
5243 complete(wl->elp_compl);
5244 wl->elp_compl = NULL;
5247 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5248 /* don't enqueue a work right now. mark it as pending */
5249 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5250 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5251 disable_irq_nosync(wl->irq);
5252 pm_wakeup_event(wl->dev, 0);
5253 spin_unlock_irqrestore(&wl->wl_lock, flags);
5256 spin_unlock_irqrestore(&wl->wl_lock, flags);
5258 return IRQ_WAKE_THREAD;
5261 int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5263 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5264 unsigned long irqflags;
5267 if (!wl->ops || !wl->ptable) {
5272 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5274 /* adjust some runtime configuration parameters */
5275 wlcore_adjust_conf(wl);
5277 wl->irq = platform_get_irq(pdev, 0);
5278 wl->platform_quirks = pdata->platform_quirks;
5279 wl->set_power = pdata->set_power;
5280 wl->dev = &pdev->dev;
5281 wl->if_ops = pdata->ops;
5283 platform_set_drvdata(pdev, wl);
5285 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5286 irqflags = IRQF_TRIGGER_RISING;
5288 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
5290 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
5294 wl1271_error("request_irq() failed: %d", ret);
5298 ret = enable_irq_wake(wl->irq);
5300 wl->irq_wake_enabled = true;
5301 device_init_wakeup(wl->dev, 1);
5302 if (pdata->pwr_in_suspend) {
5303 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
5304 wl->hw->wiphy->wowlan.n_patterns =
5305 WL1271_MAX_RX_FILTERS;
5306 wl->hw->wiphy->wowlan.pattern_min_len = 1;
5307 wl->hw->wiphy->wowlan.pattern_max_len =
5308 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
5311 disable_irq(wl->irq);
5313 ret = wl12xx_get_hw_info(wl);
5315 wl1271_error("couldn't get hw info");
5319 ret = wl->ops->identify_chip(wl);
5323 ret = wl1271_init_ieee80211(wl);
5327 ret = wl1271_register_hw(wl);
5331 /* Create sysfs file to control bt coex state */
5332 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
5334 wl1271_error("failed to create sysfs file bt_coex_state");
5338 /* Create sysfs file to get HW PG version */
5339 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
5341 wl1271_error("failed to create sysfs file hw_pg_ver");
5342 goto out_bt_coex_state;
5345 /* Create sysfs file for the FW log */
5346 ret = device_create_bin_file(wl->dev, &fwlog_attr);
5348 wl1271_error("failed to create sysfs file fwlog");
5355 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5358 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5361 free_irq(wl->irq, wl);
5369 EXPORT_SYMBOL_GPL(wlcore_probe);
5371 int __devexit wlcore_remove(struct platform_device *pdev)
5373 struct wl1271 *wl = platform_get_drvdata(pdev);
5375 if (wl->irq_wake_enabled) {
5376 device_init_wakeup(wl->dev, 0);
5377 disable_irq_wake(wl->irq);
5379 wl1271_unregister_hw(wl);
5380 free_irq(wl->irq, wl);
5385 EXPORT_SYMBOL_GPL(wlcore_remove);
5387 u32 wl12xx_debug_level = DEBUG_NONE;
5388 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
5389 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
5390 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
5392 module_param_named(fwlog, fwlog_param, charp, 0);
5393 MODULE_PARM_DESC(fwlog,
5394 "FW logger options: continuous, ondemand, dbgpins or disable");
5396 module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
5397 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
5399 module_param(no_recovery, bool, S_IRUSR | S_IWUSR);
5400 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
5402 MODULE_LICENSE("GPL");
5403 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5404 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");