1 // SPDX-License-Identifier: GPL-2.0-only
3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/etherdevice.h>
12 #include <linux/vmalloc.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_wakeirq.h>
20 #include "wl12xx_80211.h"
27 #include "vendor_cmd.h"
32 #define WL1271_BOOT_RETRIES 3
33 #define WL1271_WAKEUP_TIMEOUT 500
35 static char *fwlog_param;
36 static int fwlog_mem_blocks = -1;
37 static int bug_on_recovery = -1;
38 static int no_recovery = -1;
40 static void __wl1271_op_remove_interface(struct wl1271 *wl,
41 struct ieee80211_vif *vif,
42 bool reset_tx_queues);
43 static void wlcore_op_stop_locked(struct wl1271 *wl);
44 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
46 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
50 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
53 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
56 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
59 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
63 wl1271_info("Association completed.");
67 static void wl1271_reg_notify(struct wiphy *wiphy,
68 struct regulatory_request *request)
70 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
71 struct wl1271 *wl = hw->priv;
73 /* copy the current dfs region */
75 wl->dfs_region = request->dfs_region;
77 wlcore_regdomain_config(wl);
80 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
85 /* we should hold wl->mutex */
86 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
91 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
93 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
99 * this function is being called when the rx_streaming interval
100 * has beed changed or rx_streaming should be disabled
102 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
105 int period = wl->conf.rx_streaming.interval;
107 /* don't reconfigure if rx_streaming is disabled */
108 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
111 /* reconfigure/disable according to new streaming_period */
113 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
114 (wl->conf.rx_streaming.always ||
115 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
116 ret = wl1271_set_rx_streaming(wl, wlvif, true);
118 ret = wl1271_set_rx_streaming(wl, wlvif, false);
119 /* don't cancel_work_sync since we might deadlock */
120 del_timer_sync(&wlvif->rx_streaming_timer);
126 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
129 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
130 rx_streaming_enable_work);
131 struct wl1271 *wl = wlvif->wl;
133 mutex_lock(&wl->mutex);
135 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
136 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
137 (!wl->conf.rx_streaming.always &&
138 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
141 if (!wl->conf.rx_streaming.interval)
144 ret = pm_runtime_get_sync(wl->dev);
146 pm_runtime_put_noidle(wl->dev);
150 ret = wl1271_set_rx_streaming(wl, wlvif, true);
154 /* stop it after some time of inactivity */
155 mod_timer(&wlvif->rx_streaming_timer,
156 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
159 pm_runtime_mark_last_busy(wl->dev);
160 pm_runtime_put_autosuspend(wl->dev);
162 mutex_unlock(&wl->mutex);
165 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
168 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
169 rx_streaming_disable_work);
170 struct wl1271 *wl = wlvif->wl;
172 mutex_lock(&wl->mutex);
174 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
177 ret = pm_runtime_get_sync(wl->dev);
179 pm_runtime_put_noidle(wl->dev);
183 ret = wl1271_set_rx_streaming(wl, wlvif, false);
188 pm_runtime_mark_last_busy(wl->dev);
189 pm_runtime_put_autosuspend(wl->dev);
191 mutex_unlock(&wl->mutex);
194 static void wl1271_rx_streaming_timer(struct timer_list *t)
196 struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
197 struct wl1271 *wl = wlvif->wl;
198 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
201 /* wl->mutex must be taken */
202 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
204 /* if the watchdog is not armed, don't do anything */
205 if (wl->tx_allocated_blocks == 0)
208 cancel_delayed_work(&wl->tx_watchdog_work);
209 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
210 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
213 static void wlcore_rc_update_work(struct work_struct *work)
216 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
218 struct wl1271 *wl = wlvif->wl;
219 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
221 mutex_lock(&wl->mutex);
223 if (unlikely(wl->state != WLCORE_STATE_ON))
226 ret = pm_runtime_get_sync(wl->dev);
228 pm_runtime_put_noidle(wl->dev);
232 if (ieee80211_vif_is_mesh(vif)) {
233 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
234 true, wlvif->sta.hlid);
238 wlcore_hw_sta_rc_update(wl, wlvif);
242 pm_runtime_mark_last_busy(wl->dev);
243 pm_runtime_put_autosuspend(wl->dev);
245 mutex_unlock(&wl->mutex);
248 static void wl12xx_tx_watchdog_work(struct work_struct *work)
250 struct delayed_work *dwork;
253 dwork = to_delayed_work(work);
254 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
256 mutex_lock(&wl->mutex);
258 if (unlikely(wl->state != WLCORE_STATE_ON))
261 /* Tx went out in the meantime - everything is ok */
262 if (unlikely(wl->tx_allocated_blocks == 0))
266 * if a ROC is in progress, we might not have any Tx for a long
267 * time (e.g. pending Tx on the non-ROC channels)
269 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
270 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
271 wl->conf.tx.tx_watchdog_timeout);
272 wl12xx_rearm_tx_watchdog_locked(wl);
277 * if a scan is in progress, we might not have any Tx for a long
280 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
281 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
282 wl->conf.tx.tx_watchdog_timeout);
283 wl12xx_rearm_tx_watchdog_locked(wl);
288 * AP might cache a frame for a long time for a sleeping station,
289 * so rearm the timer if there's an AP interface with stations. If
290 * Tx is genuinely stuck we will most hopefully discover it when all
291 * stations are removed due to inactivity.
293 if (wl->active_sta_count) {
294 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
296 wl->conf.tx.tx_watchdog_timeout,
297 wl->active_sta_count);
298 wl12xx_rearm_tx_watchdog_locked(wl);
302 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
303 wl->conf.tx.tx_watchdog_timeout);
304 wl12xx_queue_recovery_work(wl);
307 mutex_unlock(&wl->mutex);
310 static void wlcore_adjust_conf(struct wl1271 *wl)
314 if (!strcmp(fwlog_param, "continuous")) {
315 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
317 } else if (!strcmp(fwlog_param, "dbgpins")) {
318 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
319 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
320 } else if (!strcmp(fwlog_param, "disable")) {
321 wl->conf.fwlog.mem_blocks = 0;
322 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
324 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
328 if (bug_on_recovery != -1)
329 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
331 if (no_recovery != -1)
332 wl->conf.recovery.no_recovery = (u8) no_recovery;
335 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
336 struct wl12xx_vif *wlvif,
341 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
344 * Wake up from high level PS if the STA is asleep with too little
345 * packets in FW or if the STA is awake.
347 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
348 wl12xx_ps_link_end(wl, wlvif, hlid);
351 * Start high-level PS if the STA is asleep with enough blocks in FW.
352 * Make an exception if this is the only connected link. In this
353 * case FW-memory congestion is less of a problem.
354 * Note that a single connected STA means 2*ap_count + 1 active links,
355 * since we must account for the global and broadcast AP links
356 * for each AP. The "fw_ps" check assures us the other link is a STA
357 * connected to the AP. Otherwise the FW would not set the PSM bit.
359 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
360 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
361 wl12xx_ps_link_start(wl, wlvif, hlid, true);
364 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
365 struct wl12xx_vif *wlvif,
366 struct wl_fw_status *status)
368 unsigned long cur_fw_ps_map;
371 cur_fw_ps_map = status->link_ps_bitmap;
372 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
373 wl1271_debug(DEBUG_PSM,
374 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
375 wl->ap_fw_ps_map, cur_fw_ps_map,
376 wl->ap_fw_ps_map ^ cur_fw_ps_map);
378 wl->ap_fw_ps_map = cur_fw_ps_map;
381 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
382 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
383 wl->links[hlid].allocated_pkts);
386 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
388 struct wl12xx_vif *wlvif;
389 u32 old_tx_blk_count = wl->tx_blocks_available;
390 int avail, freed_blocks;
393 struct wl1271_link *lnk;
395 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
397 wl->fw_status_len, false);
401 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
403 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
404 "drv_rx_counter = %d, tx_results_counter = %d)",
406 status->fw_rx_counter,
407 status->drv_rx_counter,
408 status->tx_results_counter);
410 for (i = 0; i < NUM_TX_QUEUES; i++) {
411 /* prevent wrap-around in freed-packets counter */
412 wl->tx_allocated_pkts[i] -=
413 (status->counters.tx_released_pkts[i] -
414 wl->tx_pkts_freed[i]) & 0xff;
416 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
420 for_each_set_bit(i, wl->links_map, wl->num_links) {
424 /* prevent wrap-around in freed-packets counter */
425 diff = (status->counters.tx_lnk_free_pkts[i] -
426 lnk->prev_freed_pkts) & 0xff;
431 lnk->allocated_pkts -= diff;
432 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
434 /* accumulate the prev_freed_pkts counter */
435 lnk->total_freed_pkts += diff;
438 /* prevent wrap-around in total blocks counter */
439 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
440 freed_blocks = status->total_released_blks -
443 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
444 status->total_released_blks;
446 wl->tx_blocks_freed = status->total_released_blks;
448 wl->tx_allocated_blocks -= freed_blocks;
451 * If the FW freed some blocks:
452 * If we still have allocated blocks - re-arm the timer, Tx is
453 * not stuck. Otherwise, cancel the timer (no Tx currently).
456 if (wl->tx_allocated_blocks)
457 wl12xx_rearm_tx_watchdog_locked(wl);
459 cancel_delayed_work(&wl->tx_watchdog_work);
462 avail = status->tx_total - wl->tx_allocated_blocks;
465 * The FW might change the total number of TX memblocks before
466 * we get a notification about blocks being released. Thus, the
467 * available blocks calculation might yield a temporary result
468 * which is lower than the actual available blocks. Keeping in
469 * mind that only blocks that were allocated can be moved from
470 * TX to RX, tx_blocks_available should never decrease here.
472 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
475 /* if more blocks are available now, tx work can be scheduled */
476 if (wl->tx_blocks_available > old_tx_blk_count)
477 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
479 /* for AP update num of allocated TX blocks per link and ps status */
480 wl12xx_for_each_wlvif_ap(wl, wlvif) {
481 wl12xx_irq_update_links_status(wl, wlvif, status);
484 /* update the host-chipset time offset */
485 wl->time_offset = (ktime_get_boottime_ns() >> 10) -
486 (s64)(status->fw_localtime);
488 wl->fw_fast_lnk_map = status->link_fast_bitmap;
493 static void wl1271_flush_deferred_work(struct wl1271 *wl)
497 /* Pass all received frames to the network stack */
498 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
499 ieee80211_rx_ni(wl->hw, skb);
501 /* Return sent skbs to the network stack */
502 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
503 ieee80211_tx_status_ni(wl->hw, skb);
506 static void wl1271_netstack_work(struct work_struct *work)
509 container_of(work, struct wl1271, netstack_work);
512 wl1271_flush_deferred_work(wl);
513 } while (skb_queue_len(&wl->deferred_rx_queue));
516 #define WL1271_IRQ_MAX_LOOPS 256
518 static int wlcore_irq_locked(struct wl1271 *wl)
522 int loopcount = WL1271_IRQ_MAX_LOOPS;
523 bool run_tx_queue = true;
525 unsigned int defer_count;
529 * In case edge triggered interrupt must be used, we cannot iterate
530 * more than once without introducing race conditions with the hardirq.
532 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
535 wl1271_debug(DEBUG_IRQ, "IRQ work");
537 if (unlikely(wl->state != WLCORE_STATE_ON))
540 ret = pm_runtime_get_sync(wl->dev);
542 pm_runtime_put_noidle(wl->dev);
546 while (!done && loopcount--) {
547 smp_mb__after_atomic();
549 ret = wlcore_fw_status(wl, wl->fw_status);
553 wlcore_hw_tx_immediate_compl(wl);
555 intr = wl->fw_status->intr;
556 intr &= WLCORE_ALL_INTR_MASK;
562 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
563 wl1271_error("HW watchdog interrupt received! starting recovery.");
564 wl->watchdog_recovery = true;
567 /* restarting the chip. ignore any other interrupt. */
571 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
572 wl1271_error("SW watchdog interrupt received! "
573 "starting recovery.");
574 wl->watchdog_recovery = true;
577 /* restarting the chip. ignore any other interrupt. */
581 if (likely(intr & WL1271_ACX_INTR_DATA)) {
582 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
584 ret = wlcore_rx(wl, wl->fw_status);
588 /* Check if any tx blocks were freed */
589 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
590 if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
591 if (!wl1271_tx_total_queue_count(wl))
592 run_tx_queue = false;
593 spin_unlock_irqrestore(&wl->wl_lock, flags);
597 * In order to avoid starvation of the TX path,
598 * call the work function directly.
601 ret = wlcore_tx_work_locked(wl);
607 /* check for tx results */
608 ret = wlcore_hw_tx_delayed_compl(wl);
612 /* Make sure the deferred queues don't get too long */
613 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
614 skb_queue_len(&wl->deferred_rx_queue);
615 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
616 wl1271_flush_deferred_work(wl);
619 if (intr & WL1271_ACX_INTR_EVENT_A) {
620 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
621 ret = wl1271_event_handle(wl, 0);
626 if (intr & WL1271_ACX_INTR_EVENT_B) {
627 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
628 ret = wl1271_event_handle(wl, 1);
633 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
634 wl1271_debug(DEBUG_IRQ,
635 "WL1271_ACX_INTR_INIT_COMPLETE");
637 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
638 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
642 pm_runtime_mark_last_busy(wl->dev);
643 pm_runtime_put_autosuspend(wl->dev);
649 static irqreturn_t wlcore_irq(int irq, void *cookie)
653 struct wl1271 *wl = cookie;
654 bool queue_tx_work = true;
656 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
658 /* complete the ELP completion */
659 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) {
660 spin_lock_irqsave(&wl->wl_lock, flags);
662 complete(wl->elp_compl);
663 spin_unlock_irqrestore(&wl->wl_lock, flags);
666 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
667 /* don't enqueue a work right now. mark it as pending */
668 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
669 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
670 spin_lock_irqsave(&wl->wl_lock, flags);
671 disable_irq_nosync(wl->irq);
672 pm_wakeup_event(wl->dev, 0);
673 spin_unlock_irqrestore(&wl->wl_lock, flags);
677 /* TX might be handled here, avoid redundant work */
678 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
679 cancel_work_sync(&wl->tx_work);
681 mutex_lock(&wl->mutex);
683 ret = wlcore_irq_locked(wl);
685 wl12xx_queue_recovery_work(wl);
687 /* In case TX was not handled in wlcore_irq_locked(), queue TX work */
688 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
689 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
690 if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
691 if (!wl1271_tx_total_queue_count(wl))
692 queue_tx_work = false;
693 spin_unlock_irqrestore(&wl->wl_lock, flags);
696 ieee80211_queue_work(wl->hw, &wl->tx_work);
699 mutex_unlock(&wl->mutex);
702 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
707 struct vif_counter_data {
710 struct ieee80211_vif *cur_vif;
711 bool cur_vif_running;
714 static void wl12xx_vif_count_iter(void *data, u8 *mac,
715 struct ieee80211_vif *vif)
717 struct vif_counter_data *counter = data;
720 if (counter->cur_vif == vif)
721 counter->cur_vif_running = true;
724 /* caller must not hold wl->mutex, as it might deadlock */
725 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
726 struct ieee80211_vif *cur_vif,
727 struct vif_counter_data *data)
729 memset(data, 0, sizeof(*data));
730 data->cur_vif = cur_vif;
732 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
733 wl12xx_vif_count_iter, data);
736 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
738 const struct firmware *fw;
740 enum wl12xx_fw_type fw_type;
744 fw_type = WL12XX_FW_TYPE_PLT;
745 fw_name = wl->plt_fw_name;
748 * we can't call wl12xx_get_vif_count() here because
749 * wl->mutex is taken, so use the cached last_vif_count value
751 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
752 fw_type = WL12XX_FW_TYPE_MULTI;
753 fw_name = wl->mr_fw_name;
755 fw_type = WL12XX_FW_TYPE_NORMAL;
756 fw_name = wl->sr_fw_name;
760 if (wl->fw_type == fw_type)
763 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
765 ret = request_firmware(&fw, fw_name, wl->dev);
768 wl1271_error("could not get firmware %s: %d", fw_name, ret);
773 wl1271_error("firmware size is not multiple of 32 bits: %zu",
780 wl->fw_type = WL12XX_FW_TYPE_NONE;
781 wl->fw_len = fw->size;
782 wl->fw = vmalloc(wl->fw_len);
785 wl1271_error("could not allocate memory for the firmware");
790 memcpy(wl->fw, fw->data, wl->fw_len);
792 wl->fw_type = fw_type;
794 release_firmware(fw);
799 void wl12xx_queue_recovery_work(struct wl1271 *wl)
801 /* Avoid a recursive recovery */
802 if (wl->state == WLCORE_STATE_ON) {
803 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
806 wl->state = WLCORE_STATE_RESTARTING;
807 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
808 ieee80211_queue_work(wl->hw, &wl->recovery_work);
812 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
816 /* Make sure we have enough room */
817 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
819 /* Fill the FW log file, consumed by the sysfs fwlog entry */
820 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
821 wl->fwlog_size += len;
826 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
831 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
834 wl1271_info("Reading FW panic log");
837 * Make sure the chip is awake and the logger isn't active.
838 * Do not send a stop fwlog command if the fw is hanged or if
839 * dbgpins are used (due to some fw bug).
841 error = pm_runtime_get_sync(wl->dev);
843 pm_runtime_put_noidle(wl->dev);
846 if (!wl->watchdog_recovery &&
847 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
848 wl12xx_cmd_stop_fwlog(wl);
850 /* Traverse the memory blocks linked list */
852 end_of_log = wlcore_event_fw_logger(wl);
853 if (end_of_log == 0) {
855 end_of_log = wlcore_event_fw_logger(wl);
857 } while (end_of_log != 0);
860 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
861 u8 hlid, struct ieee80211_sta *sta)
863 struct wl1271_station *wl_sta;
864 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
866 wl_sta = (void *)sta->drv_priv;
867 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
870 * increment the initial seq number on recovery to account for
871 * transmitted packets that we haven't yet got in the FW status
873 if (wlvif->encryption_type == KEY_GEM)
874 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
876 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
877 wl_sta->total_freed_pkts += sqn_recovery_padding;
880 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
881 struct wl12xx_vif *wlvif,
882 u8 hlid, const u8 *addr)
884 struct ieee80211_sta *sta;
885 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
887 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
888 is_zero_ether_addr(addr)))
892 sta = ieee80211_find_sta(vif, addr);
894 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
898 static void wlcore_print_recovery(struct wl1271 *wl)
904 wl1271_info("Hardware recovery in progress. FW ver: %s",
905 wl->chip.fw_ver_str);
907 /* change partitions momentarily so we can read the FW pc */
908 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
912 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
916 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
920 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
921 pc, hint_sts, ++wl->recovery_count);
923 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
927 static void wl1271_recovery_work(struct work_struct *work)
930 container_of(work, struct wl1271, recovery_work);
931 struct wl12xx_vif *wlvif;
932 struct ieee80211_vif *vif;
935 mutex_lock(&wl->mutex);
937 if (wl->state == WLCORE_STATE_OFF || wl->plt)
940 error = pm_runtime_get_sync(wl->dev);
942 wl1271_warning("Enable for recovery failed");
943 pm_runtime_put_noidle(wl->dev);
945 wlcore_disable_interrupts_nosync(wl);
947 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
948 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
949 wl12xx_read_fwlog_panic(wl);
950 wlcore_print_recovery(wl);
953 BUG_ON(wl->conf.recovery.bug_on_recovery &&
954 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
956 clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
958 if (wl->conf.recovery.no_recovery) {
959 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
963 /* Prevent spurious TX during FW restart */
964 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
966 /* reboot the chipset */
967 while (!list_empty(&wl->wlvif_list)) {
968 wlvif = list_first_entry(&wl->wlvif_list,
969 struct wl12xx_vif, list);
970 vif = wl12xx_wlvif_to_vif(wlvif);
972 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
973 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
974 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
975 vif->bss_conf.bssid);
978 __wl1271_op_remove_interface(wl, vif, false);
981 wlcore_op_stop_locked(wl);
982 pm_runtime_mark_last_busy(wl->dev);
983 pm_runtime_put_autosuspend(wl->dev);
985 ieee80211_restart_hw(wl->hw);
988 * Its safe to enable TX now - the queues are stopped after a request
991 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
994 wl->watchdog_recovery = false;
995 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
996 mutex_unlock(&wl->mutex);
999 static int wlcore_fw_wakeup(struct wl1271 *wl)
1001 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1004 static int wl1271_setup(struct wl1271 *wl)
1006 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1007 if (!wl->raw_fw_status)
1010 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1014 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1020 kfree(wl->fw_status);
1021 kfree(wl->raw_fw_status);
1025 static int wl12xx_set_power_on(struct wl1271 *wl)
1029 msleep(WL1271_PRE_POWER_ON_SLEEP);
1030 ret = wl1271_power_on(wl);
1033 msleep(WL1271_POWER_ON_SLEEP);
1034 wl1271_io_reset(wl);
1037 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1041 /* ELP module wake up */
1042 ret = wlcore_fw_wakeup(wl);
1050 wl1271_power_off(wl);
1054 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1058 ret = wl12xx_set_power_on(wl);
1063 * For wl127x based devices we could use the default block
1064 * size (512 bytes), but due to a bug in the sdio driver, we
1065 * need to set it explicitly after the chip is powered on. To
1066 * simplify the code and since the performance impact is
1067 * negligible, we use the same block size for all different
1070 * Check if the bus supports blocksize alignment and, if it
1071 * doesn't, make sure we don't have the quirk.
1073 if (!wl1271_set_block_size(wl))
1074 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1076 /* TODO: make sure the lower driver has set things up correctly */
1078 ret = wl1271_setup(wl);
1082 ret = wl12xx_fetch_firmware(wl, plt);
1084 kfree(wl->fw_status);
1085 kfree(wl->raw_fw_status);
1086 kfree(wl->tx_res_if);
1093 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1095 int retries = WL1271_BOOT_RETRIES;
1096 struct wiphy *wiphy = wl->hw->wiphy;
1098 static const char* const PLT_MODE[] = {
1107 mutex_lock(&wl->mutex);
1109 wl1271_notice("power up");
1111 if (wl->state != WLCORE_STATE_OFF) {
1112 wl1271_error("cannot go into PLT state because not "
1113 "in off state: %d", wl->state);
1118 /* Indicate to lower levels that we are now in PLT mode */
1120 wl->plt_mode = plt_mode;
1124 ret = wl12xx_chip_wakeup(wl, true);
1128 if (plt_mode != PLT_CHIP_AWAKE) {
1129 ret = wl->ops->plt_init(wl);
1134 wl->state = WLCORE_STATE_ON;
1135 wl1271_notice("firmware booted in PLT mode %s (%s)",
1137 wl->chip.fw_ver_str);
1139 /* update hw/fw version info in wiphy struct */
1140 wiphy->hw_version = wl->chip.id;
1141 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1142 sizeof(wiphy->fw_version));
1147 wl1271_power_off(wl);
1151 wl->plt_mode = PLT_OFF;
1153 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1154 WL1271_BOOT_RETRIES);
1156 mutex_unlock(&wl->mutex);
1161 int wl1271_plt_stop(struct wl1271 *wl)
1165 wl1271_notice("power down");
1168 * Interrupts must be disabled before setting the state to OFF.
1169 * Otherwise, the interrupt handler might be called and exit without
1170 * reading the interrupt status.
1172 wlcore_disable_interrupts(wl);
1173 mutex_lock(&wl->mutex);
1175 mutex_unlock(&wl->mutex);
1178 * This will not necessarily enable interrupts as interrupts
1179 * may have been disabled when op_stop was called. It will,
1180 * however, balance the above call to disable_interrupts().
1182 wlcore_enable_interrupts(wl);
1184 wl1271_error("cannot power down because not in PLT "
1185 "state: %d", wl->state);
1190 mutex_unlock(&wl->mutex);
1192 wl1271_flush_deferred_work(wl);
1193 cancel_work_sync(&wl->netstack_work);
1194 cancel_work_sync(&wl->recovery_work);
1195 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1197 mutex_lock(&wl->mutex);
1198 wl1271_power_off(wl);
1200 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1201 wl->state = WLCORE_STATE_OFF;
1203 wl->plt_mode = PLT_OFF;
1205 mutex_unlock(&wl->mutex);
1211 static void wl1271_op_tx(struct ieee80211_hw *hw,
1212 struct ieee80211_tx_control *control,
1213 struct sk_buff *skb)
1215 struct wl1271 *wl = hw->priv;
1216 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1217 struct ieee80211_vif *vif = info->control.vif;
1218 struct wl12xx_vif *wlvif = NULL;
1219 unsigned long flags;
1224 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1225 ieee80211_free_txskb(hw, skb);
1229 wlvif = wl12xx_vif_to_data(vif);
1230 mapping = skb_get_queue_mapping(skb);
1231 q = wl1271_tx_get_queue(mapping);
1233 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1235 spin_lock_irqsave(&wl->wl_lock, flags);
1238 * drop the packet if the link is invalid or the queue is stopped
1239 * for any reason but watermark. Watermark is a "soft"-stop so we
1240 * allow these packets through.
1242 if (hlid == WL12XX_INVALID_LINK_ID ||
1243 (!test_bit(hlid, wlvif->links_map)) ||
1244 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1245 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1246 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1247 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1248 ieee80211_free_txskb(hw, skb);
1252 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1254 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1256 wl->tx_queue_count[q]++;
1257 wlvif->tx_queue_count[q]++;
1260 * The workqueue is slow to process the tx_queue and we need stop
1261 * the queue here, otherwise the queue will get too long.
1263 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1264 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1265 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1266 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1267 wlcore_stop_queue_locked(wl, wlvif, q,
1268 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1272 * The chip specific setup must run before the first TX packet -
1273 * before that, the tx_work will not be initialized!
1276 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1277 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1278 ieee80211_queue_work(wl->hw, &wl->tx_work);
1281 spin_unlock_irqrestore(&wl->wl_lock, flags);
1284 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1286 unsigned long flags;
1289 /* no need to queue a new dummy packet if one is already pending */
1290 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1293 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1295 spin_lock_irqsave(&wl->wl_lock, flags);
1296 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1297 wl->tx_queue_count[q]++;
1298 spin_unlock_irqrestore(&wl->wl_lock, flags);
1300 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1301 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1302 return wlcore_tx_work_locked(wl);
1305 * If the FW TX is busy, TX work will be scheduled by the threaded
1306 * interrupt handler function
1312 * The size of the dummy packet should be at least 1400 bytes. However, in
1313 * order to minimize the number of bus transactions, aligning it to 512 bytes
1314 * boundaries could be beneficial, performance wise
1316 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1318 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1320 struct sk_buff *skb;
1321 struct ieee80211_hdr_3addr *hdr;
1322 unsigned int dummy_packet_size;
1324 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1325 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1327 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1329 wl1271_warning("Failed to allocate a dummy packet skb");
1333 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1335 hdr = skb_put_zero(skb, sizeof(*hdr));
1336 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1337 IEEE80211_STYPE_NULLFUNC |
1338 IEEE80211_FCTL_TODS);
1340 skb_put_zero(skb, dummy_packet_size);
1342 /* Dummy packets require the TID to be management */
1343 skb->priority = WL1271_TID_MGMT;
1345 /* Initialize all fields that might be used */
1346 skb_set_queue_mapping(skb, 0);
1347 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1354 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1356 int num_fields = 0, in_field = 0, fields_size = 0;
1357 int i, pattern_len = 0;
1360 wl1271_warning("No mask in WoWLAN pattern");
1365 * The pattern is broken up into segments of bytes at different offsets
1366 * that need to be checked by the FW filter. Each segment is called
1367 * a field in the FW API. We verify that the total number of fields
1368 * required for this pattern won't exceed FW limits (8)
1369 * as well as the total fields buffer won't exceed the FW limit.
1370 * Note that if there's a pattern which crosses Ethernet/IP header
1371 * boundary a new field is required.
1373 for (i = 0; i < p->pattern_len; i++) {
1374 if (test_bit(i, (unsigned long *)p->mask)) {
1379 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1381 fields_size += pattern_len +
1382 RX_FILTER_FIELD_OVERHEAD;
1390 fields_size += pattern_len +
1391 RX_FILTER_FIELD_OVERHEAD;
1398 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1402 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1403 wl1271_warning("RX Filter too complex. Too many segments");
1407 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1408 wl1271_warning("RX filter pattern is too big");
1415 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1417 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1420 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1427 for (i = 0; i < filter->num_fields; i++)
1428 kfree(filter->fields[i].pattern);
1433 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1434 u16 offset, u8 flags,
1435 const u8 *pattern, u8 len)
1437 struct wl12xx_rx_filter_field *field;
1439 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1440 wl1271_warning("Max fields per RX filter. can't alloc another");
1444 field = &filter->fields[filter->num_fields];
1446 field->pattern = kmemdup(pattern, len, GFP_KERNEL);
1447 if (!field->pattern) {
1448 wl1271_warning("Failed to allocate RX filter pattern");
1452 filter->num_fields++;
1454 field->offset = cpu_to_le16(offset);
1455 field->flags = flags;
1461 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1463 int i, fields_size = 0;
1465 for (i = 0; i < filter->num_fields; i++)
1466 fields_size += filter->fields[i].len +
1467 sizeof(struct wl12xx_rx_filter_field) -
1473 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1477 struct wl12xx_rx_filter_field *field;
1479 for (i = 0; i < filter->num_fields; i++) {
1480 field = (struct wl12xx_rx_filter_field *)buf;
1482 field->offset = filter->fields[i].offset;
1483 field->flags = filter->fields[i].flags;
1484 field->len = filter->fields[i].len;
1486 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1487 buf += sizeof(struct wl12xx_rx_filter_field) -
1488 sizeof(u8 *) + field->len;
1493 * Allocates an RX filter returned through f
1494 * which needs to be freed using rx_filter_free()
1497 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1498 struct wl12xx_rx_filter **f)
1501 struct wl12xx_rx_filter *filter;
1505 filter = wl1271_rx_filter_alloc();
1507 wl1271_warning("Failed to alloc rx filter");
1513 while (i < p->pattern_len) {
1514 if (!test_bit(i, (unsigned long *)p->mask)) {
1519 for (j = i; j < p->pattern_len; j++) {
1520 if (!test_bit(j, (unsigned long *)p->mask))
1523 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1524 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1528 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1530 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1532 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1533 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1538 ret = wl1271_rx_filter_alloc_field(filter,
1541 &p->pattern[i], len);
1548 filter->action = FILTER_SIGNAL;
1554 wl1271_rx_filter_free(filter);
1560 static int wl1271_configure_wowlan(struct wl1271 *wl,
1561 struct cfg80211_wowlan *wow)
1565 if (!wow || wow->any || !wow->n_patterns) {
1566 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1571 ret = wl1271_rx_filter_clear_all(wl);
1578 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1581 /* Validate all incoming patterns before clearing current FW state */
1582 for (i = 0; i < wow->n_patterns; i++) {
1583 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1585 wl1271_warning("Bad wowlan pattern %d", i);
1590 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1594 ret = wl1271_rx_filter_clear_all(wl);
1598 /* Translate WoWLAN patterns into filters */
1599 for (i = 0; i < wow->n_patterns; i++) {
1600 struct cfg80211_pkt_pattern *p;
1601 struct wl12xx_rx_filter *filter = NULL;
1603 p = &wow->patterns[i];
1605 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1607 wl1271_warning("Failed to create an RX filter from "
1608 "wowlan pattern %d", i);
1612 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1614 wl1271_rx_filter_free(filter);
1619 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1625 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1626 struct wl12xx_vif *wlvif,
1627 struct cfg80211_wowlan *wow)
1631 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1634 ret = wl1271_configure_wowlan(wl, wow);
1638 if ((wl->conf.conn.suspend_wake_up_event ==
1639 wl->conf.conn.wake_up_event) &&
1640 (wl->conf.conn.suspend_listen_interval ==
1641 wl->conf.conn.listen_interval))
1644 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1645 wl->conf.conn.suspend_wake_up_event,
1646 wl->conf.conn.suspend_listen_interval);
1649 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1655 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1656 struct wl12xx_vif *wlvif,
1657 struct cfg80211_wowlan *wow)
1661 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1664 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1668 ret = wl1271_configure_wowlan(wl, wow);
1677 static int wl1271_configure_suspend(struct wl1271 *wl,
1678 struct wl12xx_vif *wlvif,
1679 struct cfg80211_wowlan *wow)
1681 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1682 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1683 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1684 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1688 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1691 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1692 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1694 if ((!is_ap) && (!is_sta))
1697 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1698 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1701 wl1271_configure_wowlan(wl, NULL);
1704 if ((wl->conf.conn.suspend_wake_up_event ==
1705 wl->conf.conn.wake_up_event) &&
1706 (wl->conf.conn.suspend_listen_interval ==
1707 wl->conf.conn.listen_interval))
1710 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1711 wl->conf.conn.wake_up_event,
1712 wl->conf.conn.listen_interval);
1715 wl1271_error("resume: wake up conditions failed: %d",
1719 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1723 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1724 struct cfg80211_wowlan *wow)
1726 struct wl1271 *wl = hw->priv;
1727 struct wl12xx_vif *wlvif;
1728 unsigned long flags;
1731 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1734 /* we want to perform the recovery before suspending */
1735 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1736 wl1271_warning("postponing suspend to perform recovery");
1740 wl1271_tx_flush(wl);
1742 mutex_lock(&wl->mutex);
1744 ret = pm_runtime_get_sync(wl->dev);
1746 pm_runtime_put_noidle(wl->dev);
1747 mutex_unlock(&wl->mutex);
1751 wl->wow_enabled = true;
1752 wl12xx_for_each_wlvif(wl, wlvif) {
1753 if (wlcore_is_p2p_mgmt(wlvif))
1756 ret = wl1271_configure_suspend(wl, wlvif, wow);
1762 /* disable fast link flow control notifications from FW */
1763 ret = wlcore_hw_interrupt_notify(wl, false);
1767 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1768 ret = wlcore_hw_rx_ba_filter(wl,
1769 !!wl->conf.conn.suspend_rx_ba_activity);
1774 pm_runtime_put_noidle(wl->dev);
1775 mutex_unlock(&wl->mutex);
1778 wl1271_warning("couldn't prepare device to suspend");
1782 /* flush any remaining work */
1783 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1785 flush_work(&wl->tx_work);
1788 * Cancel the watchdog even if above tx_flush failed. We will detect
1789 * it on resume anyway.
1791 cancel_delayed_work(&wl->tx_watchdog_work);
1794 * set suspended flag to avoid triggering a new threaded_irq
1797 spin_lock_irqsave(&wl->wl_lock, flags);
1798 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1799 spin_unlock_irqrestore(&wl->wl_lock, flags);
1801 return pm_runtime_force_suspend(wl->dev);
1804 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1806 struct wl1271 *wl = hw->priv;
1807 struct wl12xx_vif *wlvif;
1808 unsigned long flags;
1809 bool run_irq_work = false, pending_recovery;
1812 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1814 WARN_ON(!wl->wow_enabled);
1816 ret = pm_runtime_force_resume(wl->dev);
1818 wl1271_error("ELP wakeup failure!");
1823 * re-enable irq_work enqueuing, and call irq_work directly if
1824 * there is a pending work.
1826 spin_lock_irqsave(&wl->wl_lock, flags);
1827 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1828 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1829 run_irq_work = true;
1830 spin_unlock_irqrestore(&wl->wl_lock, flags);
1832 mutex_lock(&wl->mutex);
1834 /* test the recovery flag before calling any SDIO functions */
1835 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1839 wl1271_debug(DEBUG_MAC80211,
1840 "run postponed irq_work directly");
1842 /* don't talk to the HW if recovery is pending */
1843 if (!pending_recovery) {
1844 ret = wlcore_irq_locked(wl);
1846 wl12xx_queue_recovery_work(wl);
1849 wlcore_enable_interrupts(wl);
1852 if (pending_recovery) {
1853 wl1271_warning("queuing forgotten recovery on resume");
1854 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1858 ret = pm_runtime_get_sync(wl->dev);
1860 pm_runtime_put_noidle(wl->dev);
1864 wl12xx_for_each_wlvif(wl, wlvif) {
1865 if (wlcore_is_p2p_mgmt(wlvif))
1868 wl1271_configure_resume(wl, wlvif);
1871 ret = wlcore_hw_interrupt_notify(wl, true);
1875 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1876 ret = wlcore_hw_rx_ba_filter(wl, false);
1881 pm_runtime_mark_last_busy(wl->dev);
1882 pm_runtime_put_autosuspend(wl->dev);
1885 wl->wow_enabled = false;
1888 * Set a flag to re-init the watchdog on the first Tx after resume.
1889 * That way we avoid possible conditions where Tx-complete interrupts
1890 * fail to arrive and we perform a spurious recovery.
1892 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1893 mutex_unlock(&wl->mutex);
1898 static int wl1271_op_start(struct ieee80211_hw *hw)
1900 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1903 * We have to delay the booting of the hardware because
1904 * we need to know the local MAC address before downloading and
1905 * initializing the firmware. The MAC address cannot be changed
1906 * after boot, and without the proper MAC address, the firmware
1907 * will not function properly.
1909 * The MAC address is first known when the corresponding interface
1910 * is added. That is where we will initialize the hardware.
1916 static void wlcore_op_stop_locked(struct wl1271 *wl)
1920 if (wl->state == WLCORE_STATE_OFF) {
1921 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1923 wlcore_enable_interrupts(wl);
1929 * this must be before the cancel_work calls below, so that the work
1930 * functions don't perform further work.
1932 wl->state = WLCORE_STATE_OFF;
1935 * Use the nosync variant to disable interrupts, so the mutex could be
1936 * held while doing so without deadlocking.
1938 wlcore_disable_interrupts_nosync(wl);
1940 mutex_unlock(&wl->mutex);
1942 wlcore_synchronize_interrupts(wl);
1943 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1944 cancel_work_sync(&wl->recovery_work);
1945 wl1271_flush_deferred_work(wl);
1946 cancel_delayed_work_sync(&wl->scan_complete_work);
1947 cancel_work_sync(&wl->netstack_work);
1948 cancel_work_sync(&wl->tx_work);
1949 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1951 /* let's notify MAC80211 about the remaining pending TX frames */
1952 mutex_lock(&wl->mutex);
1953 wl12xx_tx_reset(wl);
1955 wl1271_power_off(wl);
1957 * In case a recovery was scheduled, interrupts were disabled to avoid
1958 * an interrupt storm. Now that the power is down, it is safe to
1959 * re-enable interrupts to balance the disable depth
1961 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1962 wlcore_enable_interrupts(wl);
1964 wl->band = NL80211_BAND_2GHZ;
1967 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1968 wl->channel_type = NL80211_CHAN_NO_HT;
1969 wl->tx_blocks_available = 0;
1970 wl->tx_allocated_blocks = 0;
1971 wl->tx_results_count = 0;
1972 wl->tx_packets_count = 0;
1973 wl->time_offset = 0;
1974 wl->ap_fw_ps_map = 0;
1976 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1977 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1978 memset(wl->links_map, 0, sizeof(wl->links_map));
1979 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1980 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1981 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1982 wl->active_sta_count = 0;
1983 wl->active_link_count = 0;
1985 /* The system link is always allocated */
1986 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1987 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1988 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1991 * this is performed after the cancel_work calls and the associated
1992 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1993 * get executed before all these vars have been reset.
1997 wl->tx_blocks_freed = 0;
1999 for (i = 0; i < NUM_TX_QUEUES; i++) {
2000 wl->tx_pkts_freed[i] = 0;
2001 wl->tx_allocated_pkts[i] = 0;
2004 wl1271_debugfs_reset(wl);
2006 kfree(wl->raw_fw_status);
2007 wl->raw_fw_status = NULL;
2008 kfree(wl->fw_status);
2009 wl->fw_status = NULL;
2010 kfree(wl->tx_res_if);
2011 wl->tx_res_if = NULL;
2012 kfree(wl->target_mem_map);
2013 wl->target_mem_map = NULL;
2016 * FW channels must be re-calibrated after recovery,
2017 * save current Reg-Domain channel configuration and clear it.
2019 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2020 sizeof(wl->reg_ch_conf_pending));
2021 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2024 static void wlcore_op_stop(struct ieee80211_hw *hw)
2026 struct wl1271 *wl = hw->priv;
2028 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2030 mutex_lock(&wl->mutex);
2032 wlcore_op_stop_locked(wl);
2034 mutex_unlock(&wl->mutex);
2037 static void wlcore_channel_switch_work(struct work_struct *work)
2039 struct delayed_work *dwork;
2041 struct ieee80211_vif *vif;
2042 struct wl12xx_vif *wlvif;
2045 dwork = to_delayed_work(work);
2046 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2049 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2051 mutex_lock(&wl->mutex);
2053 if (unlikely(wl->state != WLCORE_STATE_ON))
2056 /* check the channel switch is still ongoing */
2057 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2060 vif = wl12xx_wlvif_to_vif(wlvif);
2061 ieee80211_chswitch_done(vif, false);
2063 ret = pm_runtime_get_sync(wl->dev);
2065 pm_runtime_put_noidle(wl->dev);
2069 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2071 pm_runtime_mark_last_busy(wl->dev);
2072 pm_runtime_put_autosuspend(wl->dev);
2074 mutex_unlock(&wl->mutex);
2077 static void wlcore_connection_loss_work(struct work_struct *work)
2079 struct delayed_work *dwork;
2081 struct ieee80211_vif *vif;
2082 struct wl12xx_vif *wlvif;
2084 dwork = to_delayed_work(work);
2085 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2088 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2090 mutex_lock(&wl->mutex);
2092 if (unlikely(wl->state != WLCORE_STATE_ON))
2095 /* Call mac80211 connection loss */
2096 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2099 vif = wl12xx_wlvif_to_vif(wlvif);
2100 ieee80211_connection_loss(vif);
2102 mutex_unlock(&wl->mutex);
2105 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2107 struct delayed_work *dwork;
2109 struct wl12xx_vif *wlvif;
2110 unsigned long time_spare;
2113 dwork = to_delayed_work(work);
2114 wlvif = container_of(dwork, struct wl12xx_vif,
2115 pending_auth_complete_work);
2118 mutex_lock(&wl->mutex);
2120 if (unlikely(wl->state != WLCORE_STATE_ON))
2124 * Make sure a second really passed since the last auth reply. Maybe
2125 * a second auth reply arrived while we were stuck on the mutex.
2126 * Check for a little less than the timeout to protect from scheduler
2129 time_spare = jiffies +
2130 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2131 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2134 ret = pm_runtime_get_sync(wl->dev);
2136 pm_runtime_put_noidle(wl->dev);
2140 /* cancel the ROC if active */
2141 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2143 pm_runtime_mark_last_busy(wl->dev);
2144 pm_runtime_put_autosuspend(wl->dev);
2146 mutex_unlock(&wl->mutex);
2149 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2151 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2152 WL12XX_MAX_RATE_POLICIES);
2153 if (policy >= WL12XX_MAX_RATE_POLICIES)
2156 __set_bit(policy, wl->rate_policies_map);
2161 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2163 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2166 __clear_bit(*idx, wl->rate_policies_map);
2167 *idx = WL12XX_MAX_RATE_POLICIES;
2170 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2172 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2173 WLCORE_MAX_KLV_TEMPLATES);
2174 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2177 __set_bit(policy, wl->klv_templates_map);
2182 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2184 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2187 __clear_bit(*idx, wl->klv_templates_map);
2188 *idx = WLCORE_MAX_KLV_TEMPLATES;
2191 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2193 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2195 switch (wlvif->bss_type) {
2196 case BSS_TYPE_AP_BSS:
2198 return WL1271_ROLE_P2P_GO;
2199 else if (ieee80211_vif_is_mesh(vif))
2200 return WL1271_ROLE_MESH_POINT;
2202 return WL1271_ROLE_AP;
2204 case BSS_TYPE_STA_BSS:
2206 return WL1271_ROLE_P2P_CL;
2208 return WL1271_ROLE_STA;
2211 return WL1271_ROLE_IBSS;
2214 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2216 return WL12XX_INVALID_ROLE_TYPE;
2219 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2221 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2224 /* clear everything but the persistent data */
2225 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2227 switch (ieee80211_vif_type_p2p(vif)) {
2228 case NL80211_IFTYPE_P2P_CLIENT:
2231 case NL80211_IFTYPE_STATION:
2232 case NL80211_IFTYPE_P2P_DEVICE:
2233 wlvif->bss_type = BSS_TYPE_STA_BSS;
2235 case NL80211_IFTYPE_ADHOC:
2236 wlvif->bss_type = BSS_TYPE_IBSS;
2238 case NL80211_IFTYPE_P2P_GO:
2241 case NL80211_IFTYPE_AP:
2242 case NL80211_IFTYPE_MESH_POINT:
2243 wlvif->bss_type = BSS_TYPE_AP_BSS;
2246 wlvif->bss_type = MAX_BSS_TYPE;
2250 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2251 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2252 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2254 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2255 wlvif->bss_type == BSS_TYPE_IBSS) {
2256 /* init sta/ibss data */
2257 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2258 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2259 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2260 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2261 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2262 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2263 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2264 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2267 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2268 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2269 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2270 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2271 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2272 wl12xx_allocate_rate_policy(wl,
2273 &wlvif->ap.ucast_rate_idx[i]);
2274 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2276 * TODO: check if basic_rate shouldn't be
2277 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2278 * instead (the same thing for STA above).
2280 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2281 /* TODO: this seems to be used only for STA, check it */
2282 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2285 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2286 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2287 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2290 * mac80211 configures some values globally, while we treat them
2291 * per-interface. thus, on init, we have to copy them from wl
2293 wlvif->band = wl->band;
2294 wlvif->channel = wl->channel;
2295 wlvif->power_level = wl->power_level;
2296 wlvif->channel_type = wl->channel_type;
2298 INIT_WORK(&wlvif->rx_streaming_enable_work,
2299 wl1271_rx_streaming_enable_work);
2300 INIT_WORK(&wlvif->rx_streaming_disable_work,
2301 wl1271_rx_streaming_disable_work);
2302 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2303 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2304 wlcore_channel_switch_work);
2305 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2306 wlcore_connection_loss_work);
2307 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2308 wlcore_pending_auth_complete_work);
2309 INIT_LIST_HEAD(&wlvif->list);
2311 timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2315 static int wl12xx_init_fw(struct wl1271 *wl)
2317 int retries = WL1271_BOOT_RETRIES;
2318 bool booted = false;
2319 struct wiphy *wiphy = wl->hw->wiphy;
2324 ret = wl12xx_chip_wakeup(wl, false);
2328 ret = wl->ops->boot(wl);
2332 ret = wl1271_hw_init(wl);
2340 mutex_unlock(&wl->mutex);
2341 /* Unlocking the mutex in the middle of handling is
2342 inherently unsafe. In this case we deem it safe to do,
2343 because we need to let any possibly pending IRQ out of
2344 the system (and while we are WLCORE_STATE_OFF the IRQ
2345 work function will not do anything.) Also, any other
2346 possible concurrent operations will fail due to the
2347 current state, hence the wl1271 struct should be safe. */
2348 wlcore_disable_interrupts(wl);
2349 wl1271_flush_deferred_work(wl);
2350 cancel_work_sync(&wl->netstack_work);
2351 mutex_lock(&wl->mutex);
2353 wl1271_power_off(wl);
2357 wl1271_error("firmware boot failed despite %d retries",
2358 WL1271_BOOT_RETRIES);
2362 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2364 /* update hw/fw version info in wiphy struct */
2365 wiphy->hw_version = wl->chip.id;
2366 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2367 sizeof(wiphy->fw_version));
2370 * Now we know if 11a is supported (info from the NVS), so disable
2371 * 11a channels if not supported
2373 if (!wl->enable_11a)
2374 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2376 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2377 wl->enable_11a ? "" : "not ");
2379 wl->state = WLCORE_STATE_ON;
2384 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2386 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2390 * Check whether a fw switch (i.e. moving from one loaded
2391 * fw to another) is needed. This function is also responsible
2392 * for updating wl->last_vif_count, so it must be called before
2393 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2396 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2397 struct vif_counter_data vif_counter_data,
2400 enum wl12xx_fw_type current_fw = wl->fw_type;
2401 u8 vif_count = vif_counter_data.counter;
2403 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2406 /* increase the vif count if this is a new vif */
2407 if (add && !vif_counter_data.cur_vif_running)
2410 wl->last_vif_count = vif_count;
2412 /* no need for fw change if the device is OFF */
2413 if (wl->state == WLCORE_STATE_OFF)
2416 /* no need for fw change if a single fw is used */
2417 if (!wl->mr_fw_name)
2420 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2422 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2429 * Enter "forced psm". Make sure the sta is in psm against the ap,
2430 * to make the fw switch a bit more disconnection-persistent.
2432 static void wl12xx_force_active_psm(struct wl1271 *wl)
2434 struct wl12xx_vif *wlvif;
2436 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2437 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2441 struct wlcore_hw_queue_iter_data {
2442 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2444 struct ieee80211_vif *vif;
2445 /* is the current vif among those iterated */
2449 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2450 struct ieee80211_vif *vif)
2452 struct wlcore_hw_queue_iter_data *iter_data = data;
2454 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2455 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2458 if (iter_data->cur_running || vif == iter_data->vif) {
2459 iter_data->cur_running = true;
2463 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2466 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2467 struct wl12xx_vif *wlvif)
2469 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2470 struct wlcore_hw_queue_iter_data iter_data = {};
2473 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2474 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2478 iter_data.vif = vif;
2480 /* mark all bits taken by active interfaces */
2481 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2482 IEEE80211_IFACE_ITER_RESUME_ALL,
2483 wlcore_hw_queue_iter, &iter_data);
2485 /* the current vif is already running in mac80211 (resume/recovery) */
2486 if (iter_data.cur_running) {
2487 wlvif->hw_queue_base = vif->hw_queue[0];
2488 wl1271_debug(DEBUG_MAC80211,
2489 "using pre-allocated hw queue base %d",
2490 wlvif->hw_queue_base);
2492 /* interface type might have changed type */
2493 goto adjust_cab_queue;
2496 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2497 WLCORE_NUM_MAC_ADDRESSES);
2498 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2501 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2502 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2503 wlvif->hw_queue_base);
2505 for (i = 0; i < NUM_TX_QUEUES; i++) {
2506 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2507 /* register hw queues in mac80211 */
2508 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2512 /* the last places are reserved for cab queues per interface */
2513 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2514 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2515 wlvif->hw_queue_base / NUM_TX_QUEUES;
2517 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2522 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2523 struct ieee80211_vif *vif)
2525 struct wl1271 *wl = hw->priv;
2526 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2527 struct vif_counter_data vif_count;
2532 wl1271_error("Adding Interface not allowed while in PLT mode");
2536 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2537 IEEE80211_VIF_SUPPORTS_UAPSD |
2538 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2540 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2541 ieee80211_vif_type_p2p(vif), vif->addr);
2543 wl12xx_get_vif_count(hw, vif, &vif_count);
2545 mutex_lock(&wl->mutex);
2548 * in some very corner case HW recovery scenarios its possible to
2549 * get here before __wl1271_op_remove_interface is complete, so
2550 * opt out if that is the case.
2552 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2553 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2559 ret = wl12xx_init_vif_data(wl, vif);
2564 role_type = wl12xx_get_role_type(wl, wlvif);
2565 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2570 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2575 * TODO: after the nvs issue will be solved, move this block
2576 * to start(), and make sure here the driver is ON.
2578 if (wl->state == WLCORE_STATE_OFF) {
2580 * we still need this in order to configure the fw
2581 * while uploading the nvs
2583 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2585 ret = wl12xx_init_fw(wl);
2591 * Call runtime PM only after possible wl12xx_init_fw() above
2592 * is done. Otherwise we do not have interrupts enabled.
2594 ret = pm_runtime_get_sync(wl->dev);
2596 pm_runtime_put_noidle(wl->dev);
2600 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2601 wl12xx_force_active_psm(wl);
2602 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2603 mutex_unlock(&wl->mutex);
2604 wl1271_recovery_work(&wl->recovery_work);
2608 if (!wlcore_is_p2p_mgmt(wlvif)) {
2609 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2610 role_type, &wlvif->role_id);
2614 ret = wl1271_init_vif_specific(wl, vif);
2619 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2620 &wlvif->dev_role_id);
2624 /* needed mainly for configuring rate policies */
2625 ret = wl1271_sta_hw_init(wl, wlvif);
2630 list_add(&wlvif->list, &wl->wlvif_list);
2631 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2633 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2638 pm_runtime_mark_last_busy(wl->dev);
2639 pm_runtime_put_autosuspend(wl->dev);
2641 mutex_unlock(&wl->mutex);
2646 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2647 struct ieee80211_vif *vif,
2648 bool reset_tx_queues)
2650 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2652 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2654 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2656 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2659 /* because of hardware recovery, we may get here twice */
2660 if (wl->state == WLCORE_STATE_OFF)
2663 wl1271_info("down");
2665 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2666 wl->scan_wlvif == wlvif) {
2667 struct cfg80211_scan_info info = {
2672 * Rearm the tx watchdog just before idling scan. This
2673 * prevents just-finished scans from triggering the watchdog
2675 wl12xx_rearm_tx_watchdog_locked(wl);
2677 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2678 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2679 wl->scan_wlvif = NULL;
2680 wl->scan.req = NULL;
2681 ieee80211_scan_completed(wl->hw, &info);
2684 if (wl->sched_vif == wlvif)
2685 wl->sched_vif = NULL;
2687 if (wl->roc_vif == vif) {
2689 ieee80211_remain_on_channel_expired(wl->hw);
2692 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2693 /* disable active roles */
2694 ret = pm_runtime_get_sync(wl->dev);
2696 pm_runtime_put_noidle(wl->dev);
2700 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2701 wlvif->bss_type == BSS_TYPE_IBSS) {
2702 if (wl12xx_dev_role_started(wlvif))
2703 wl12xx_stop_dev(wl, wlvif);
2706 if (!wlcore_is_p2p_mgmt(wlvif)) {
2707 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2709 pm_runtime_put_noidle(wl->dev);
2713 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2715 pm_runtime_put_noidle(wl->dev);
2720 pm_runtime_mark_last_busy(wl->dev);
2721 pm_runtime_put_autosuspend(wl->dev);
2724 wl12xx_tx_reset_wlvif(wl, wlvif);
2726 /* clear all hlids (except system_hlid) */
2727 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2729 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2730 wlvif->bss_type == BSS_TYPE_IBSS) {
2731 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2732 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2733 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2734 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2735 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2737 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2738 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2739 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2740 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2741 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2742 wl12xx_free_rate_policy(wl,
2743 &wlvif->ap.ucast_rate_idx[i]);
2744 wl1271_free_ap_keys(wl, wlvif);
2747 dev_kfree_skb(wlvif->probereq);
2748 wlvif->probereq = NULL;
2749 if (wl->last_wlvif == wlvif)
2750 wl->last_wlvif = NULL;
2751 list_del(&wlvif->list);
2752 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2753 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2754 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2762 * Last AP, have more stations. Configure sleep auth according to STA.
2763 * Don't do thin on unintended recovery.
2765 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2766 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2769 if (wl->ap_count == 0 && is_ap) {
2770 /* mask ap events */
2771 wl->event_mask &= ~wl->ap_event_mask;
2772 wl1271_event_unmask(wl);
2775 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2776 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2777 /* Configure for power according to debugfs */
2778 if (sta_auth != WL1271_PSM_ILLEGAL)
2779 wl1271_acx_sleep_auth(wl, sta_auth);
2780 /* Configure for ELP power saving */
2782 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2786 mutex_unlock(&wl->mutex);
2788 del_timer_sync(&wlvif->rx_streaming_timer);
2789 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2790 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2791 cancel_work_sync(&wlvif->rc_update_work);
2792 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2793 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2794 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2796 mutex_lock(&wl->mutex);
2799 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2800 struct ieee80211_vif *vif)
2802 struct wl1271 *wl = hw->priv;
2803 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2804 struct wl12xx_vif *iter;
2805 struct vif_counter_data vif_count;
2807 wl12xx_get_vif_count(hw, vif, &vif_count);
2808 mutex_lock(&wl->mutex);
2810 if (wl->state == WLCORE_STATE_OFF ||
2811 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2815 * wl->vif can be null here if someone shuts down the interface
2816 * just when hardware recovery has been started.
2818 wl12xx_for_each_wlvif(wl, iter) {
2822 __wl1271_op_remove_interface(wl, vif, true);
2825 WARN_ON(iter != wlvif);
2826 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2827 wl12xx_force_active_psm(wl);
2828 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2829 wl12xx_queue_recovery_work(wl);
2832 mutex_unlock(&wl->mutex);
2835 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2836 struct ieee80211_vif *vif,
2837 enum nl80211_iftype new_type, bool p2p)
2839 struct wl1271 *wl = hw->priv;
2842 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2843 wl1271_op_remove_interface(hw, vif);
2845 vif->type = new_type;
2847 ret = wl1271_op_add_interface(hw, vif);
2849 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2853 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2856 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2859 * One of the side effects of the JOIN command is that is clears
2860 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2861 * to a WPA/WPA2 access point will therefore kill the data-path.
2862 * Currently the only valid scenario for JOIN during association
2863 * is on roaming, in which case we will also be given new keys.
2864 * Keep the below message for now, unless it starts bothering
2865 * users who really like to roam a lot :)
2867 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2868 wl1271_info("JOIN while associated.");
2870 /* clear encryption type */
2871 wlvif->encryption_type = KEY_NONE;
2874 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2876 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2881 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2885 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2889 wl1271_error("No SSID in IEs!");
2894 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2895 wl1271_error("SSID is too long!");
2899 wlvif->ssid_len = ssid_len;
2900 memcpy(wlvif->ssid, ptr+2, ssid_len);
2904 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2906 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2907 struct sk_buff *skb;
2910 /* we currently only support setting the ssid from the ap probe req */
2911 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2914 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2918 ieoffset = offsetof(struct ieee80211_mgmt,
2919 u.probe_req.variable);
2920 wl1271_ssid_set(wlvif, skb, ieoffset);
2926 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2927 struct ieee80211_bss_conf *bss_conf,
2933 wlvif->aid = bss_conf->aid;
2934 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2935 wlvif->beacon_int = bss_conf->beacon_int;
2936 wlvif->wmm_enabled = bss_conf->qos;
2938 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2941 * with wl1271, we don't need to update the
2942 * beacon_int and dtim_period, because the firmware
2943 * updates it by itself when the first beacon is
2944 * received after a join.
2946 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2951 * Get a template for hardware connection maintenance
2953 dev_kfree_skb(wlvif->probereq);
2954 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2957 ieoffset = offsetof(struct ieee80211_mgmt,
2958 u.probe_req.variable);
2959 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2961 /* enable the connection monitoring feature */
2962 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2967 * The join command disable the keep-alive mode, shut down its process,
2968 * and also clear the template config, so we need to reset it all after
2969 * the join. The acx_aid starts the keep-alive process, and the order
2970 * of the commands below is relevant.
2972 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2976 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2980 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2984 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2985 wlvif->sta.klv_template_id,
2986 ACX_KEEP_ALIVE_TPL_VALID);
2991 * The default fw psm configuration is AUTO, while mac80211 default
2992 * setting is off (ACTIVE), so sync the fw with the correct value.
2994 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3000 wl1271_tx_enabled_rates_get(wl,
3003 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3011 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3014 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3016 /* make sure we are connected (sta) joined */
3018 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3021 /* make sure we are joined (ibss) */
3023 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3027 /* use defaults when not associated */
3030 /* free probe-request template */
3031 dev_kfree_skb(wlvif->probereq);
3032 wlvif->probereq = NULL;
3034 /* disable connection monitor features */
3035 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3039 /* Disable the keep-alive feature */
3040 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3044 /* disable beacon filtering */
3045 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3050 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3051 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3053 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3054 ieee80211_chswitch_done(vif, false);
3055 cancel_delayed_work(&wlvif->channel_switch_work);
3058 /* invalidate keep-alive template */
3059 wl1271_acx_keep_alive_config(wl, wlvif,
3060 wlvif->sta.klv_template_id,
3061 ACX_KEEP_ALIVE_TPL_INVALID);
3066 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3068 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3069 wlvif->rate_set = wlvif->basic_rate_set;
3072 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3075 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3077 if (idle == cur_idle)
3081 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3083 /* The current firmware only supports sched_scan in idle */
3084 if (wl->sched_vif == wlvif)
3085 wl->ops->sched_scan_stop(wl, wlvif);
3087 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3091 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3092 struct ieee80211_conf *conf, u32 changed)
3096 if (wlcore_is_p2p_mgmt(wlvif))
3099 if (conf->power_level != wlvif->power_level) {
3100 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3104 wlvif->power_level = conf->power_level;
3110 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3112 struct wl1271 *wl = hw->priv;
3113 struct wl12xx_vif *wlvif;
3114 struct ieee80211_conf *conf = &hw->conf;
3117 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3119 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3121 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3124 mutex_lock(&wl->mutex);
3126 if (changed & IEEE80211_CONF_CHANGE_POWER)
3127 wl->power_level = conf->power_level;
3129 if (unlikely(wl->state != WLCORE_STATE_ON))
3132 ret = pm_runtime_get_sync(wl->dev);
3134 pm_runtime_put_noidle(wl->dev);
3138 /* configure each interface */
3139 wl12xx_for_each_wlvif(wl, wlvif) {
3140 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3146 pm_runtime_mark_last_busy(wl->dev);
3147 pm_runtime_put_autosuspend(wl->dev);
3150 mutex_unlock(&wl->mutex);
3155 struct wl1271_filter_params {
3158 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3161 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3162 struct netdev_hw_addr_list *mc_list)
3164 struct wl1271_filter_params *fp;
3165 struct netdev_hw_addr *ha;
3167 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3169 wl1271_error("Out of memory setting filters.");
3173 /* update multicast filtering parameters */
3174 fp->mc_list_length = 0;
3175 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3176 fp->enabled = false;
3179 netdev_hw_addr_list_for_each(ha, mc_list) {
3180 memcpy(fp->mc_list[fp->mc_list_length],
3181 ha->addr, ETH_ALEN);
3182 fp->mc_list_length++;
3186 return (u64)(unsigned long)fp;
3189 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3191 FIF_BCN_PRBRESP_PROMISC | \
3195 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3196 unsigned int changed,
3197 unsigned int *total, u64 multicast)
3199 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3200 struct wl1271 *wl = hw->priv;
3201 struct wl12xx_vif *wlvif;
3205 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3206 " total %x", changed, *total);
3208 mutex_lock(&wl->mutex);
3210 *total &= WL1271_SUPPORTED_FILTERS;
3211 changed &= WL1271_SUPPORTED_FILTERS;
3213 if (unlikely(wl->state != WLCORE_STATE_ON))
3216 ret = pm_runtime_get_sync(wl->dev);
3218 pm_runtime_put_noidle(wl->dev);
3222 wl12xx_for_each_wlvif(wl, wlvif) {
3223 if (wlcore_is_p2p_mgmt(wlvif))
3226 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3227 if (*total & FIF_ALLMULTI)
3228 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3232 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3235 fp->mc_list_length);
3241 * If interface in AP mode and created with allmulticast then disable
3242 * the firmware filters so that all multicast packets are passed
3243 * This is mandatory for MDNS based discovery protocols
3245 if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3246 if (*total & FIF_ALLMULTI) {
3247 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3257 * the fw doesn't provide an api to configure the filters. instead,
3258 * the filters configuration is based on the active roles / ROC
3263 pm_runtime_mark_last_busy(wl->dev);
3264 pm_runtime_put_autosuspend(wl->dev);
3267 mutex_unlock(&wl->mutex);
3271 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3272 u8 id, u8 key_type, u8 key_size,
3273 const u8 *key, u8 hlid, u32 tx_seq_32,
3274 u16 tx_seq_16, bool is_pairwise)
3276 struct wl1271_ap_key *ap_key;
3279 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3281 if (key_size > MAX_KEY_SIZE)
3285 * Find next free entry in ap_keys. Also check we are not replacing
3288 for (i = 0; i < MAX_NUM_KEYS; i++) {
3289 if (wlvif->ap.recorded_keys[i] == NULL)
3292 if (wlvif->ap.recorded_keys[i]->id == id) {
3293 wl1271_warning("trying to record key replacement");
3298 if (i == MAX_NUM_KEYS)
3301 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3306 ap_key->key_type = key_type;
3307 ap_key->key_size = key_size;
3308 memcpy(ap_key->key, key, key_size);
3309 ap_key->hlid = hlid;
3310 ap_key->tx_seq_32 = tx_seq_32;
3311 ap_key->tx_seq_16 = tx_seq_16;
3312 ap_key->is_pairwise = is_pairwise;
3314 wlvif->ap.recorded_keys[i] = ap_key;
3318 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3322 for (i = 0; i < MAX_NUM_KEYS; i++) {
3323 kfree(wlvif->ap.recorded_keys[i]);
3324 wlvif->ap.recorded_keys[i] = NULL;
3328 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3331 struct wl1271_ap_key *key;
3332 bool wep_key_added = false;
3334 for (i = 0; i < MAX_NUM_KEYS; i++) {
3336 if (wlvif->ap.recorded_keys[i] == NULL)
3339 key = wlvif->ap.recorded_keys[i];
3341 if (hlid == WL12XX_INVALID_LINK_ID)
3342 hlid = wlvif->ap.bcast_hlid;
3344 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3345 key->id, key->key_type,
3346 key->key_size, key->key,
3347 hlid, key->tx_seq_32,
3348 key->tx_seq_16, key->is_pairwise);
3352 if (key->key_type == KEY_WEP)
3353 wep_key_added = true;
3356 if (wep_key_added) {
3357 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3358 wlvif->ap.bcast_hlid);
3364 wl1271_free_ap_keys(wl, wlvif);
3368 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3369 u16 action, u8 id, u8 key_type,
3370 u8 key_size, const u8 *key, u32 tx_seq_32,
3371 u16 tx_seq_16, struct ieee80211_sta *sta,
3375 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3378 struct wl1271_station *wl_sta;
3382 wl_sta = (struct wl1271_station *)sta->drv_priv;
3383 hlid = wl_sta->hlid;
3385 hlid = wlvif->ap.bcast_hlid;
3388 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3390 * We do not support removing keys after AP shutdown.
3391 * Pretend we do to make mac80211 happy.
3393 if (action != KEY_ADD_OR_REPLACE)
3396 ret = wl1271_record_ap_key(wl, wlvif, id,
3398 key, hlid, tx_seq_32,
3399 tx_seq_16, is_pairwise);
3401 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3402 id, key_type, key_size,
3403 key, hlid, tx_seq_32,
3404 tx_seq_16, is_pairwise);
3411 static const u8 bcast_addr[ETH_ALEN] = {
3412 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3415 addr = sta ? sta->addr : bcast_addr;
3417 if (is_zero_ether_addr(addr)) {
3418 /* We dont support TX only encryption */
3422 /* The wl1271 does not allow to remove unicast keys - they
3423 will be cleared automatically on next CMD_JOIN. Ignore the
3424 request silently, as we dont want the mac80211 to emit
3425 an error message. */
3426 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3429 /* don't remove key if hlid was already deleted */
3430 if (action == KEY_REMOVE &&
3431 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3434 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3435 id, key_type, key_size,
3436 key, addr, tx_seq_32,
3446 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3447 struct ieee80211_vif *vif,
3448 struct ieee80211_sta *sta,
3449 struct ieee80211_key_conf *key_conf)
3451 struct wl1271 *wl = hw->priv;
3453 bool might_change_spare =
3454 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3455 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3457 if (might_change_spare) {
3459 * stop the queues and flush to ensure the next packets are
3460 * in sync with FW spare block accounting
3462 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3463 wl1271_tx_flush(wl);
3466 mutex_lock(&wl->mutex);
3468 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3470 goto out_wake_queues;
3473 ret = pm_runtime_get_sync(wl->dev);
3475 pm_runtime_put_noidle(wl->dev);
3476 goto out_wake_queues;
3479 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3481 pm_runtime_mark_last_busy(wl->dev);
3482 pm_runtime_put_autosuspend(wl->dev);
3485 if (might_change_spare)
3486 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3488 mutex_unlock(&wl->mutex);
3493 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3494 struct ieee80211_vif *vif,
3495 struct ieee80211_sta *sta,
3496 struct ieee80211_key_conf *key_conf)
3498 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3506 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3508 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3509 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3510 key_conf->cipher, key_conf->keyidx,
3511 key_conf->keylen, key_conf->flags);
3512 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3514 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3516 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3517 hlid = wl_sta->hlid;
3519 hlid = wlvif->ap.bcast_hlid;
3522 hlid = wlvif->sta.hlid;
3524 if (hlid != WL12XX_INVALID_LINK_ID) {
3525 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3526 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3527 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3530 switch (key_conf->cipher) {
3531 case WLAN_CIPHER_SUITE_WEP40:
3532 case WLAN_CIPHER_SUITE_WEP104:
3535 key_conf->hw_key_idx = key_conf->keyidx;
3537 case WLAN_CIPHER_SUITE_TKIP:
3538 key_type = KEY_TKIP;
3539 key_conf->hw_key_idx = key_conf->keyidx;
3541 case WLAN_CIPHER_SUITE_CCMP:
3543 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3545 case WL1271_CIPHER_SUITE_GEM:
3549 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3554 is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
3558 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3559 key_conf->keyidx, key_type,
3560 key_conf->keylen, key_conf->key,
3561 tx_seq_32, tx_seq_16, sta, is_pairwise);
3563 wl1271_error("Could not add or replace key");
3568 * reconfiguring arp response if the unicast (or common)
3569 * encryption key type was changed
3571 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3572 (sta || key_type == KEY_WEP) &&
3573 wlvif->encryption_type != key_type) {
3574 wlvif->encryption_type = key_type;
3575 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3577 wl1271_warning("build arp rsp failed: %d", ret);
3584 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3585 key_conf->keyidx, key_type,
3586 key_conf->keylen, key_conf->key,
3587 0, 0, sta, is_pairwise);
3589 wl1271_error("Could not remove key");
3595 wl1271_error("Unsupported key cmd 0x%x", cmd);
3601 EXPORT_SYMBOL_GPL(wlcore_set_key);
3603 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3604 struct ieee80211_vif *vif,
3607 struct wl1271 *wl = hw->priv;
3608 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3611 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3614 /* we don't handle unsetting of default key */
3618 mutex_lock(&wl->mutex);
3620 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3625 ret = pm_runtime_get_sync(wl->dev);
3627 pm_runtime_put_noidle(wl->dev);
3631 wlvif->default_key = key_idx;
3633 /* the default WEP key needs to be configured at least once */
3634 if (wlvif->encryption_type == KEY_WEP) {
3635 ret = wl12xx_cmd_set_default_wep_key(wl,
3643 pm_runtime_mark_last_busy(wl->dev);
3644 pm_runtime_put_autosuspend(wl->dev);
3647 mutex_unlock(&wl->mutex);
3650 void wlcore_regdomain_config(struct wl1271 *wl)
3654 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3657 mutex_lock(&wl->mutex);
3659 if (unlikely(wl->state != WLCORE_STATE_ON))
3662 ret = pm_runtime_get_sync(wl->dev);
3664 pm_runtime_put_autosuspend(wl->dev);
3668 ret = wlcore_cmd_regdomain_config_locked(wl);
3670 wl12xx_queue_recovery_work(wl);
3674 pm_runtime_mark_last_busy(wl->dev);
3675 pm_runtime_put_autosuspend(wl->dev);
3677 mutex_unlock(&wl->mutex);
3680 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3681 struct ieee80211_vif *vif,
3682 struct ieee80211_scan_request *hw_req)
3684 struct cfg80211_scan_request *req = &hw_req->req;
3685 struct wl1271 *wl = hw->priv;
3690 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3693 ssid = req->ssids[0].ssid;
3694 len = req->ssids[0].ssid_len;
3697 mutex_lock(&wl->mutex);
3699 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3701 * We cannot return -EBUSY here because cfg80211 will expect
3702 * a call to ieee80211_scan_completed if we do - in this case
3703 * there won't be any call.
3709 ret = pm_runtime_get_sync(wl->dev);
3711 pm_runtime_put_noidle(wl->dev);
3715 /* fail if there is any role in ROC */
3716 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3717 /* don't allow scanning right now */
3722 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3724 pm_runtime_mark_last_busy(wl->dev);
3725 pm_runtime_put_autosuspend(wl->dev);
3727 mutex_unlock(&wl->mutex);
3732 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3733 struct ieee80211_vif *vif)
3735 struct wl1271 *wl = hw->priv;
3736 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3737 struct cfg80211_scan_info info = {
3742 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3744 mutex_lock(&wl->mutex);
3746 if (unlikely(wl->state != WLCORE_STATE_ON))
3749 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3752 ret = pm_runtime_get_sync(wl->dev);
3754 pm_runtime_put_noidle(wl->dev);
3758 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3759 ret = wl->ops->scan_stop(wl, wlvif);
3765 * Rearm the tx watchdog just before idling scan. This
3766 * prevents just-finished scans from triggering the watchdog
3768 wl12xx_rearm_tx_watchdog_locked(wl);
3770 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3771 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3772 wl->scan_wlvif = NULL;
3773 wl->scan.req = NULL;
3774 ieee80211_scan_completed(wl->hw, &info);
3777 pm_runtime_mark_last_busy(wl->dev);
3778 pm_runtime_put_autosuspend(wl->dev);
3780 mutex_unlock(&wl->mutex);
3782 cancel_delayed_work_sync(&wl->scan_complete_work);
3785 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3786 struct ieee80211_vif *vif,
3787 struct cfg80211_sched_scan_request *req,
3788 struct ieee80211_scan_ies *ies)
3790 struct wl1271 *wl = hw->priv;
3791 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3794 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3796 mutex_lock(&wl->mutex);
3798 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3803 ret = pm_runtime_get_sync(wl->dev);
3805 pm_runtime_put_noidle(wl->dev);
3809 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3813 wl->sched_vif = wlvif;
3816 pm_runtime_mark_last_busy(wl->dev);
3817 pm_runtime_put_autosuspend(wl->dev);
3819 mutex_unlock(&wl->mutex);
3823 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3824 struct ieee80211_vif *vif)
3826 struct wl1271 *wl = hw->priv;
3827 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3830 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3832 mutex_lock(&wl->mutex);
3834 if (unlikely(wl->state != WLCORE_STATE_ON))
3837 ret = pm_runtime_get_sync(wl->dev);
3839 pm_runtime_put_noidle(wl->dev);
3843 wl->ops->sched_scan_stop(wl, wlvif);
3845 pm_runtime_mark_last_busy(wl->dev);
3846 pm_runtime_put_autosuspend(wl->dev);
3848 mutex_unlock(&wl->mutex);
3853 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3855 struct wl1271 *wl = hw->priv;
3858 mutex_lock(&wl->mutex);
3860 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3865 ret = pm_runtime_get_sync(wl->dev);
3867 pm_runtime_put_noidle(wl->dev);
3871 ret = wl1271_acx_frag_threshold(wl, value);
3873 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3875 pm_runtime_mark_last_busy(wl->dev);
3876 pm_runtime_put_autosuspend(wl->dev);
3879 mutex_unlock(&wl->mutex);
3884 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3886 struct wl1271 *wl = hw->priv;
3887 struct wl12xx_vif *wlvif;
3890 mutex_lock(&wl->mutex);
3892 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3897 ret = pm_runtime_get_sync(wl->dev);
3899 pm_runtime_put_noidle(wl->dev);
3903 wl12xx_for_each_wlvif(wl, wlvif) {
3904 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3906 wl1271_warning("set rts threshold failed: %d", ret);
3908 pm_runtime_mark_last_busy(wl->dev);
3909 pm_runtime_put_autosuspend(wl->dev);
3912 mutex_unlock(&wl->mutex);
3917 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3920 const u8 *next, *end = skb->data + skb->len;
3921 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3922 skb->len - ieoffset);
3927 memmove(ie, next, end - next);
3928 skb_trim(skb, skb->len - len);
3931 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3932 unsigned int oui, u8 oui_type,
3936 const u8 *next, *end = skb->data + skb->len;
3937 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3938 skb->data + ieoffset,
3939 skb->len - ieoffset);
3944 memmove(ie, next, end - next);
3945 skb_trim(skb, skb->len - len);
3948 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3949 struct ieee80211_vif *vif)
3951 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3952 struct sk_buff *skb;
3955 skb = ieee80211_proberesp_get(wl->hw, vif);
3959 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3960 CMD_TEMPL_AP_PROBE_RESPONSE,
3969 wl1271_debug(DEBUG_AP, "probe response updated");
3970 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3976 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3977 struct ieee80211_vif *vif,
3979 size_t probe_rsp_len,
3982 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3983 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3984 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3985 int ssid_ie_offset, ie_offset, templ_len;
3988 /* no need to change probe response if the SSID is set correctly */
3989 if (wlvif->ssid_len > 0)
3990 return wl1271_cmd_template_set(wl, wlvif->role_id,
3991 CMD_TEMPL_AP_PROBE_RESPONSE,
3996 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3997 wl1271_error("probe_rsp template too big");
4001 /* start searching from IE offset */
4002 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4004 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4005 probe_rsp_len - ie_offset);
4007 wl1271_error("No SSID in beacon!");
4011 ssid_ie_offset = ptr - probe_rsp_data;
4012 ptr += (ptr[1] + 2);
4014 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4016 /* insert SSID from bss_conf */
4017 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4018 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4019 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4020 bss_conf->ssid, bss_conf->ssid_len);
4021 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4023 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4024 ptr, probe_rsp_len - (ptr - probe_rsp_data));
4025 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4027 return wl1271_cmd_template_set(wl, wlvif->role_id,
4028 CMD_TEMPL_AP_PROBE_RESPONSE,
4034 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4035 struct ieee80211_vif *vif,
4036 struct ieee80211_bss_conf *bss_conf,
4039 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4042 if (changed & BSS_CHANGED_ERP_SLOT) {
4043 if (bss_conf->use_short_slot)
4044 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4046 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4048 wl1271_warning("Set slot time failed %d", ret);
4053 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4054 if (bss_conf->use_short_preamble)
4055 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4057 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4060 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4061 if (bss_conf->use_cts_prot)
4062 ret = wl1271_acx_cts_protect(wl, wlvif,
4065 ret = wl1271_acx_cts_protect(wl, wlvif,
4066 CTSPROTECT_DISABLE);
4068 wl1271_warning("Set ctsprotect failed %d", ret);
4077 static int wlcore_set_beacon_template(struct wl1271 *wl,
4078 struct ieee80211_vif *vif,
4081 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4082 struct ieee80211_hdr *hdr;
4085 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4086 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4094 wl1271_debug(DEBUG_MASTER, "beacon updated");
4096 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4098 dev_kfree_skb(beacon);
4101 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4102 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4104 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4109 dev_kfree_skb(beacon);
4113 wlvif->wmm_enabled =
4114 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4115 WLAN_OUI_TYPE_MICROSOFT_WMM,
4116 beacon->data + ieoffset,
4117 beacon->len - ieoffset);
4120 * In case we already have a probe-resp beacon set explicitly
4121 * by usermode, don't use the beacon data.
4123 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4126 /* remove TIM ie from probe response */
4127 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4130 * remove p2p ie from probe response.
4131 * the fw reponds to probe requests that don't include
4132 * the p2p ie. probe requests with p2p ie will be passed,
4133 * and will be responded by the supplicant (the spec
4134 * forbids including the p2p ie when responding to probe
4135 * requests that didn't include it).
4137 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4138 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4140 hdr = (struct ieee80211_hdr *) beacon->data;
4141 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4142 IEEE80211_STYPE_PROBE_RESP);
4144 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4149 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4150 CMD_TEMPL_PROBE_RESPONSE,
4155 dev_kfree_skb(beacon);
4163 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4164 struct ieee80211_vif *vif,
4165 struct ieee80211_bss_conf *bss_conf,
4168 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4169 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4172 if (changed & BSS_CHANGED_BEACON_INT) {
4173 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4174 bss_conf->beacon_int);
4176 wlvif->beacon_int = bss_conf->beacon_int;
4179 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4180 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4182 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4185 if (changed & BSS_CHANGED_BEACON) {
4186 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4190 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4192 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4199 wl1271_error("beacon info change failed: %d", ret);
4203 /* AP mode changes */
4204 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4205 struct ieee80211_vif *vif,
4206 struct ieee80211_bss_conf *bss_conf,
4209 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4212 if (changed & BSS_CHANGED_BASIC_RATES) {
4213 u32 rates = bss_conf->basic_rates;
4215 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4217 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4218 wlvif->basic_rate_set);
4220 ret = wl1271_init_ap_rates(wl, wlvif);
4222 wl1271_error("AP rate policy change failed %d", ret);
4226 ret = wl1271_ap_init_templates(wl, vif);
4230 /* No need to set probe resp template for mesh */
4231 if (!ieee80211_vif_is_mesh(vif)) {
4232 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4239 ret = wlcore_set_beacon_template(wl, vif, true);
4244 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4248 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4249 if (bss_conf->enable_beacon) {
4250 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4251 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4255 ret = wl1271_ap_init_hwenc(wl, wlvif);
4259 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4260 wl1271_debug(DEBUG_AP, "started AP");
4263 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4265 * AP might be in ROC in case we have just
4266 * sent auth reply. handle it.
4268 if (test_bit(wlvif->role_id, wl->roc_map))
4269 wl12xx_croc(wl, wlvif->role_id);
4271 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4275 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4276 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4278 wl1271_debug(DEBUG_AP, "stopped AP");
4283 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4287 /* Handle HT information change */
4288 if ((changed & BSS_CHANGED_HT) &&
4289 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4290 ret = wl1271_acx_set_ht_information(wl, wlvif,
4291 bss_conf->ht_operation_mode);
4293 wl1271_warning("Set ht information failed %d", ret);
4302 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4303 struct ieee80211_bss_conf *bss_conf,
4309 wl1271_debug(DEBUG_MAC80211,
4310 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4311 bss_conf->bssid, bss_conf->aid,
4312 bss_conf->beacon_int,
4313 bss_conf->basic_rates, sta_rate_set);
4315 wlvif->beacon_int = bss_conf->beacon_int;
4316 rates = bss_conf->basic_rates;
4317 wlvif->basic_rate_set =
4318 wl1271_tx_enabled_rates_get(wl, rates,
4321 wl1271_tx_min_rate_get(wl,
4322 wlvif->basic_rate_set);
4326 wl1271_tx_enabled_rates_get(wl,
4330 /* we only support sched_scan while not connected */
4331 if (wl->sched_vif == wlvif)
4332 wl->ops->sched_scan_stop(wl, wlvif);
4334 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4338 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4342 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4346 wlcore_set_ssid(wl, wlvif);
4348 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4353 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4357 /* revert back to minimum rates for the current band */
4358 wl1271_set_band_rate(wl, wlvif);
4359 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4361 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4365 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4366 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4367 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4372 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4375 /* STA/IBSS mode changes */
4376 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4377 struct ieee80211_vif *vif,
4378 struct ieee80211_bss_conf *bss_conf,
4381 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4382 bool do_join = false;
4383 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4384 bool ibss_joined = false;
4385 u32 sta_rate_set = 0;
4387 struct ieee80211_sta *sta;
4388 bool sta_exists = false;
4389 struct ieee80211_sta_ht_cap sta_ht_cap;
4392 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4398 if (changed & BSS_CHANGED_IBSS) {
4399 if (bss_conf->ibss_joined) {
4400 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4403 wlcore_unset_assoc(wl, wlvif);
4404 wl12xx_cmd_role_stop_sta(wl, wlvif);
4408 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4411 /* Need to update the SSID (for filtering etc) */
4412 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4415 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4416 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4417 bss_conf->enable_beacon ? "enabled" : "disabled");
4422 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4423 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4425 if (changed & BSS_CHANGED_CQM) {
4426 bool enable = false;
4427 if (bss_conf->cqm_rssi_thold)
4429 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4430 bss_conf->cqm_rssi_thold,
4431 bss_conf->cqm_rssi_hyst);
4434 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4437 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4438 BSS_CHANGED_ASSOC)) {
4440 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4442 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4444 /* save the supp_rates of the ap */
4445 sta_rate_set = sta->supp_rates[wlvif->band];
4446 if (sta->ht_cap.ht_supported)
4448 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4449 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4450 sta_ht_cap = sta->ht_cap;
4457 if (changed & BSS_CHANGED_BSSID) {
4458 if (!is_zero_ether_addr(bss_conf->bssid)) {
4459 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4464 /* Need to update the BSSID (for filtering etc) */
4467 ret = wlcore_clear_bssid(wl, wlvif);
4473 if (changed & BSS_CHANGED_IBSS) {
4474 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4475 bss_conf->ibss_joined);
4477 if (bss_conf->ibss_joined) {
4478 u32 rates = bss_conf->basic_rates;
4479 wlvif->basic_rate_set =
4480 wl1271_tx_enabled_rates_get(wl, rates,
4483 wl1271_tx_min_rate_get(wl,
4484 wlvif->basic_rate_set);
4486 /* by default, use 11b + OFDM rates */
4487 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4488 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4494 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4495 /* enable beacon filtering */
4496 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4501 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4506 ret = wlcore_join(wl, wlvif);
4508 wl1271_warning("cmd join failed %d", ret);
4513 if (changed & BSS_CHANGED_ASSOC) {
4514 if (bss_conf->assoc) {
4515 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4520 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4521 wl12xx_set_authorized(wl, wlvif);
4523 wlcore_unset_assoc(wl, wlvif);
4527 if (changed & BSS_CHANGED_PS) {
4528 if ((bss_conf->ps) &&
4529 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4530 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4534 if (wl->conf.conn.forced_ps) {
4535 ps_mode = STATION_POWER_SAVE_MODE;
4536 ps_mode_str = "forced";
4538 ps_mode = STATION_AUTO_PS_MODE;
4539 ps_mode_str = "auto";
4542 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4544 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4546 wl1271_warning("enter %s ps failed %d",
4548 } else if (!bss_conf->ps &&
4549 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4550 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4552 ret = wl1271_ps_set_mode(wl, wlvif,
4553 STATION_ACTIVE_MODE);
4555 wl1271_warning("exit auto ps failed %d", ret);
4559 /* Handle new association with HT. Do this after join. */
4562 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4564 ret = wlcore_hw_set_peer_cap(wl,
4570 wl1271_warning("Set ht cap failed %d", ret);
4576 ret = wl1271_acx_set_ht_information(wl, wlvif,
4577 bss_conf->ht_operation_mode);
4579 wl1271_warning("Set ht information failed %d",
4586 /* Handle arp filtering. Done after join. */
4587 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4588 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4589 __be32 addr = bss_conf->arp_addr_list[0];
4590 wlvif->sta.qos = bss_conf->qos;
4591 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4593 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4594 wlvif->ip_addr = addr;
4596 * The template should have been configured only upon
4597 * association. however, it seems that the correct ip
4598 * isn't being set (when sending), so we have to
4599 * reconfigure the template upon every ip change.
4601 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4603 wl1271_warning("build arp rsp failed: %d", ret);
4607 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4608 (ACX_ARP_FILTER_ARP_FILTERING |
4609 ACX_ARP_FILTER_AUTO_ARP),
4613 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4624 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4625 struct ieee80211_vif *vif,
4626 struct ieee80211_bss_conf *bss_conf,
4629 struct wl1271 *wl = hw->priv;
4630 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4631 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4634 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4635 wlvif->role_id, (int)changed);
4638 * make sure to cancel pending disconnections if our association
4641 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4642 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4644 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4645 !bss_conf->enable_beacon)
4646 wl1271_tx_flush(wl);
4648 mutex_lock(&wl->mutex);
4650 if (unlikely(wl->state != WLCORE_STATE_ON))
4653 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4656 ret = pm_runtime_get_sync(wl->dev);
4658 pm_runtime_put_noidle(wl->dev);
4662 if ((changed & BSS_CHANGED_TXPOWER) &&
4663 bss_conf->txpower != wlvif->power_level) {
4665 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4669 wlvif->power_level = bss_conf->txpower;
4673 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4675 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4677 pm_runtime_mark_last_busy(wl->dev);
4678 pm_runtime_put_autosuspend(wl->dev);
4681 mutex_unlock(&wl->mutex);
4684 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4685 struct ieee80211_chanctx_conf *ctx)
4687 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4688 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4689 cfg80211_get_chandef_type(&ctx->def));
4693 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4694 struct ieee80211_chanctx_conf *ctx)
4696 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4697 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4698 cfg80211_get_chandef_type(&ctx->def));
4701 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4702 struct ieee80211_chanctx_conf *ctx,
4705 struct wl1271 *wl = hw->priv;
4706 struct wl12xx_vif *wlvif;
4708 int channel = ieee80211_frequency_to_channel(
4709 ctx->def.chan->center_freq);
4711 wl1271_debug(DEBUG_MAC80211,
4712 "mac80211 change chanctx %d (type %d) changed 0x%x",
4713 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4715 mutex_lock(&wl->mutex);
4717 ret = pm_runtime_get_sync(wl->dev);
4719 pm_runtime_put_noidle(wl->dev);
4723 wl12xx_for_each_wlvif(wl, wlvif) {
4724 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4727 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4733 /* start radar if needed */
4734 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4735 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4736 ctx->radar_enabled && !wlvif->radar_enabled &&
4737 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4738 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4739 wlcore_hw_set_cac(wl, wlvif, true);
4740 wlvif->radar_enabled = true;
4744 pm_runtime_mark_last_busy(wl->dev);
4745 pm_runtime_put_autosuspend(wl->dev);
4747 mutex_unlock(&wl->mutex);
4750 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4751 struct ieee80211_vif *vif,
4752 struct ieee80211_chanctx_conf *ctx)
4754 struct wl1271 *wl = hw->priv;
4755 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4756 int channel = ieee80211_frequency_to_channel(
4757 ctx->def.chan->center_freq);
4760 wl1271_debug(DEBUG_MAC80211,
4761 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4762 wlvif->role_id, channel,
4763 cfg80211_get_chandef_type(&ctx->def),
4764 ctx->radar_enabled, ctx->def.chan->dfs_state);
4766 mutex_lock(&wl->mutex);
4768 if (unlikely(wl->state != WLCORE_STATE_ON))
4771 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4774 ret = pm_runtime_get_sync(wl->dev);
4776 pm_runtime_put_noidle(wl->dev);
4780 wlvif->band = ctx->def.chan->band;
4781 wlvif->channel = channel;
4782 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4784 /* update default rates according to the band */
4785 wl1271_set_band_rate(wl, wlvif);
4787 if (ctx->radar_enabled &&
4788 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4789 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4790 wlcore_hw_set_cac(wl, wlvif, true);
4791 wlvif->radar_enabled = true;
4794 pm_runtime_mark_last_busy(wl->dev);
4795 pm_runtime_put_autosuspend(wl->dev);
4797 mutex_unlock(&wl->mutex);
4802 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4803 struct ieee80211_vif *vif,
4804 struct ieee80211_chanctx_conf *ctx)
4806 struct wl1271 *wl = hw->priv;
4807 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4810 wl1271_debug(DEBUG_MAC80211,
4811 "mac80211 unassign chanctx (role %d) %d (type %d)",
4813 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4814 cfg80211_get_chandef_type(&ctx->def));
4816 wl1271_tx_flush(wl);
4818 mutex_lock(&wl->mutex);
4820 if (unlikely(wl->state != WLCORE_STATE_ON))
4823 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4826 ret = pm_runtime_get_sync(wl->dev);
4828 pm_runtime_put_noidle(wl->dev);
4832 if (wlvif->radar_enabled) {
4833 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4834 wlcore_hw_set_cac(wl, wlvif, false);
4835 wlvif->radar_enabled = false;
4838 pm_runtime_mark_last_busy(wl->dev);
4839 pm_runtime_put_autosuspend(wl->dev);
4841 mutex_unlock(&wl->mutex);
4844 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4845 struct wl12xx_vif *wlvif,
4846 struct ieee80211_chanctx_conf *new_ctx)
4848 int channel = ieee80211_frequency_to_channel(
4849 new_ctx->def.chan->center_freq);
4851 wl1271_debug(DEBUG_MAC80211,
4852 "switch vif (role %d) %d -> %d chan_type: %d",
4853 wlvif->role_id, wlvif->channel, channel,
4854 cfg80211_get_chandef_type(&new_ctx->def));
4856 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4859 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4861 if (wlvif->radar_enabled) {
4862 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4863 wlcore_hw_set_cac(wl, wlvif, false);
4864 wlvif->radar_enabled = false;
4867 wlvif->band = new_ctx->def.chan->band;
4868 wlvif->channel = channel;
4869 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4871 /* start radar if needed */
4872 if (new_ctx->radar_enabled) {
4873 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4874 wlcore_hw_set_cac(wl, wlvif, true);
4875 wlvif->radar_enabled = true;
4882 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4883 struct ieee80211_vif_chanctx_switch *vifs,
4885 enum ieee80211_chanctx_switch_mode mode)
4887 struct wl1271 *wl = hw->priv;
4890 wl1271_debug(DEBUG_MAC80211,
4891 "mac80211 switch chanctx n_vifs %d mode %d",
4894 mutex_lock(&wl->mutex);
4896 ret = pm_runtime_get_sync(wl->dev);
4898 pm_runtime_put_noidle(wl->dev);
4902 for (i = 0; i < n_vifs; i++) {
4903 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4905 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4910 pm_runtime_mark_last_busy(wl->dev);
4911 pm_runtime_put_autosuspend(wl->dev);
4913 mutex_unlock(&wl->mutex);
4918 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4919 struct ieee80211_vif *vif, u16 queue,
4920 const struct ieee80211_tx_queue_params *params)
4922 struct wl1271 *wl = hw->priv;
4923 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4927 if (wlcore_is_p2p_mgmt(wlvif))
4930 mutex_lock(&wl->mutex);
4932 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4935 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4937 ps_scheme = CONF_PS_SCHEME_LEGACY;
4939 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4942 ret = pm_runtime_get_sync(wl->dev);
4944 pm_runtime_put_noidle(wl->dev);
4949 * the txop is confed in units of 32us by the mac80211,
4952 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4953 params->cw_min, params->cw_max,
4954 params->aifs, params->txop << 5);
4958 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4959 CONF_CHANNEL_TYPE_EDCF,
4960 wl1271_tx_get_queue(queue),
4961 ps_scheme, CONF_ACK_POLICY_LEGACY,
4965 pm_runtime_mark_last_busy(wl->dev);
4966 pm_runtime_put_autosuspend(wl->dev);
4969 mutex_unlock(&wl->mutex);
4974 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4975 struct ieee80211_vif *vif)
4978 struct wl1271 *wl = hw->priv;
4979 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4980 u64 mactime = ULLONG_MAX;
4983 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4985 mutex_lock(&wl->mutex);
4987 if (unlikely(wl->state != WLCORE_STATE_ON))
4990 ret = pm_runtime_get_sync(wl->dev);
4992 pm_runtime_put_noidle(wl->dev);
4996 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5001 pm_runtime_mark_last_busy(wl->dev);
5002 pm_runtime_put_autosuspend(wl->dev);
5005 mutex_unlock(&wl->mutex);
5009 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5010 struct survey_info *survey)
5012 struct ieee80211_conf *conf = &hw->conf;
5017 survey->channel = conf->chandef.chan;
5022 static int wl1271_allocate_sta(struct wl1271 *wl,
5023 struct wl12xx_vif *wlvif,
5024 struct ieee80211_sta *sta)
5026 struct wl1271_station *wl_sta;
5030 if (wl->active_sta_count >= wl->max_ap_stations) {
5031 wl1271_warning("could not allocate HLID - too much stations");
5035 wl_sta = (struct wl1271_station *)sta->drv_priv;
5036 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5038 wl1271_warning("could not allocate HLID - too many links");
5042 /* use the previous security seq, if this is a recovery/resume */
5043 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5045 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5046 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5047 wl->active_sta_count++;
5051 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5053 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5056 clear_bit(hlid, wlvif->ap.sta_hlid_map);
5057 __clear_bit(hlid, &wl->ap_ps_map);
5058 __clear_bit(hlid, &wl->ap_fw_ps_map);
5061 * save the last used PN in the private part of iee80211_sta,
5062 * in case of recovery/suspend
5064 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5066 wl12xx_free_link(wl, wlvif, &hlid);
5067 wl->active_sta_count--;
5070 * rearm the tx watchdog when the last STA is freed - give the FW a
5071 * chance to return STA-buffered packets before complaining.
5073 if (wl->active_sta_count == 0)
5074 wl12xx_rearm_tx_watchdog_locked(wl);
5077 static int wl12xx_sta_add(struct wl1271 *wl,
5078 struct wl12xx_vif *wlvif,
5079 struct ieee80211_sta *sta)
5081 struct wl1271_station *wl_sta;
5085 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5087 ret = wl1271_allocate_sta(wl, wlvif, sta);
5091 wl_sta = (struct wl1271_station *)sta->drv_priv;
5092 hlid = wl_sta->hlid;
5094 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5096 wl1271_free_sta(wl, wlvif, hlid);
5101 static int wl12xx_sta_remove(struct wl1271 *wl,
5102 struct wl12xx_vif *wlvif,
5103 struct ieee80211_sta *sta)
5105 struct wl1271_station *wl_sta;
5108 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5110 wl_sta = (struct wl1271_station *)sta->drv_priv;
5112 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5115 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5119 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5123 static void wlcore_roc_if_possible(struct wl1271 *wl,
5124 struct wl12xx_vif *wlvif)
5126 if (find_first_bit(wl->roc_map,
5127 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5130 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5133 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5137 * when wl_sta is NULL, we treat this call as if coming from a
5138 * pending auth reply.
5139 * wl->mutex must be taken and the FW must be awake when the call
5142 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5143 struct wl1271_station *wl_sta, bool in_conn)
5146 if (WARN_ON(wl_sta && wl_sta->in_connection))
5149 if (!wlvif->ap_pending_auth_reply &&
5150 !wlvif->inconn_count)
5151 wlcore_roc_if_possible(wl, wlvif);
5154 wl_sta->in_connection = true;
5155 wlvif->inconn_count++;
5157 wlvif->ap_pending_auth_reply = true;
5160 if (wl_sta && !wl_sta->in_connection)
5163 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5166 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5170 wl_sta->in_connection = false;
5171 wlvif->inconn_count--;
5173 wlvif->ap_pending_auth_reply = false;
5176 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5177 test_bit(wlvif->role_id, wl->roc_map))
5178 wl12xx_croc(wl, wlvif->role_id);
5182 static int wl12xx_update_sta_state(struct wl1271 *wl,
5183 struct wl12xx_vif *wlvif,
5184 struct ieee80211_sta *sta,
5185 enum ieee80211_sta_state old_state,
5186 enum ieee80211_sta_state new_state)
5188 struct wl1271_station *wl_sta;
5189 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5190 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5193 wl_sta = (struct wl1271_station *)sta->drv_priv;
5195 /* Add station (AP mode) */
5197 old_state == IEEE80211_STA_NOTEXIST &&
5198 new_state == IEEE80211_STA_NONE) {
5199 ret = wl12xx_sta_add(wl, wlvif, sta);
5203 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5206 /* Remove station (AP mode) */
5208 old_state == IEEE80211_STA_NONE &&
5209 new_state == IEEE80211_STA_NOTEXIST) {
5211 wl12xx_sta_remove(wl, wlvif, sta);
5213 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5216 /* Authorize station (AP mode) */
5218 new_state == IEEE80211_STA_AUTHORIZED) {
5219 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5223 /* reconfigure rates */
5224 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5228 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5233 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5236 /* Authorize station */
5238 new_state == IEEE80211_STA_AUTHORIZED) {
5239 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5240 ret = wl12xx_set_authorized(wl, wlvif);
5246 old_state == IEEE80211_STA_AUTHORIZED &&
5247 new_state == IEEE80211_STA_ASSOC) {
5248 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5249 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5252 /* save seq number on disassoc (suspend) */
5254 old_state == IEEE80211_STA_ASSOC &&
5255 new_state == IEEE80211_STA_AUTH) {
5256 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5257 wlvif->total_freed_pkts = 0;
5260 /* restore seq number on assoc (resume) */
5262 old_state == IEEE80211_STA_AUTH &&
5263 new_state == IEEE80211_STA_ASSOC) {
5264 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5267 /* clear ROCs on failure or authorization */
5269 (new_state == IEEE80211_STA_AUTHORIZED ||
5270 new_state == IEEE80211_STA_NOTEXIST)) {
5271 if (test_bit(wlvif->role_id, wl->roc_map))
5272 wl12xx_croc(wl, wlvif->role_id);
5276 old_state == IEEE80211_STA_NOTEXIST &&
5277 new_state == IEEE80211_STA_NONE) {
5278 if (find_first_bit(wl->roc_map,
5279 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5280 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5281 wl12xx_roc(wl, wlvif, wlvif->role_id,
5282 wlvif->band, wlvif->channel);
5288 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5289 struct ieee80211_vif *vif,
5290 struct ieee80211_sta *sta,
5291 enum ieee80211_sta_state old_state,
5292 enum ieee80211_sta_state new_state)
5294 struct wl1271 *wl = hw->priv;
5295 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5298 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5299 sta->aid, old_state, new_state);
5301 mutex_lock(&wl->mutex);
5303 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5308 ret = pm_runtime_get_sync(wl->dev);
5310 pm_runtime_put_noidle(wl->dev);
5314 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5316 pm_runtime_mark_last_busy(wl->dev);
5317 pm_runtime_put_autosuspend(wl->dev);
5319 mutex_unlock(&wl->mutex);
5320 if (new_state < old_state)
5325 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5326 struct ieee80211_vif *vif,
5327 struct ieee80211_ampdu_params *params)
5329 struct wl1271 *wl = hw->priv;
5330 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5332 u8 hlid, *ba_bitmap;
5333 struct ieee80211_sta *sta = params->sta;
5334 enum ieee80211_ampdu_mlme_action action = params->action;
5335 u16 tid = params->tid;
5336 u16 *ssn = ¶ms->ssn;
5338 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5341 /* sanity check - the fields in FW are only 8bits wide */
5342 if (WARN_ON(tid > 0xFF))
5345 mutex_lock(&wl->mutex);
5347 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5352 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5353 hlid = wlvif->sta.hlid;
5354 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5355 struct wl1271_station *wl_sta;
5357 wl_sta = (struct wl1271_station *)sta->drv_priv;
5358 hlid = wl_sta->hlid;
5364 ba_bitmap = &wl->links[hlid].ba_bitmap;
5366 ret = pm_runtime_get_sync(wl->dev);
5368 pm_runtime_put_noidle(wl->dev);
5372 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5376 case IEEE80211_AMPDU_RX_START:
5377 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5382 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5384 wl1271_debug(DEBUG_RX, "exceeded max RX BA sessions");
5388 if (*ba_bitmap & BIT(tid)) {
5390 wl1271_error("cannot enable RX BA session on active "
5395 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5400 *ba_bitmap |= BIT(tid);
5401 wl->ba_rx_session_count++;
5405 case IEEE80211_AMPDU_RX_STOP:
5406 if (!(*ba_bitmap & BIT(tid))) {
5408 * this happens on reconfig - so only output a debug
5409 * message for now, and don't fail the function.
5411 wl1271_debug(DEBUG_MAC80211,
5412 "no active RX BA session on tid: %d",
5418 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5421 *ba_bitmap &= ~BIT(tid);
5422 wl->ba_rx_session_count--;
5427 * The BA initiator session management in FW independently.
5428 * Falling break here on purpose for all TX APDU commands.
5430 case IEEE80211_AMPDU_TX_START:
5431 case IEEE80211_AMPDU_TX_STOP_CONT:
5432 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5433 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5434 case IEEE80211_AMPDU_TX_OPERATIONAL:
5439 wl1271_error("Incorrect ampdu action id=%x\n", action);
5443 pm_runtime_mark_last_busy(wl->dev);
5444 pm_runtime_put_autosuspend(wl->dev);
5447 mutex_unlock(&wl->mutex);
5452 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5453 struct ieee80211_vif *vif,
5454 const struct cfg80211_bitrate_mask *mask)
5456 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5457 struct wl1271 *wl = hw->priv;
5460 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5461 mask->control[NL80211_BAND_2GHZ].legacy,
5462 mask->control[NL80211_BAND_5GHZ].legacy);
5464 mutex_lock(&wl->mutex);
5466 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5467 wlvif->bitrate_masks[i] =
5468 wl1271_tx_enabled_rates_get(wl,
5469 mask->control[i].legacy,
5472 if (unlikely(wl->state != WLCORE_STATE_ON))
5475 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5476 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5478 ret = pm_runtime_get_sync(wl->dev);
5480 pm_runtime_put_noidle(wl->dev);
5484 wl1271_set_band_rate(wl, wlvif);
5486 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5487 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5489 pm_runtime_mark_last_busy(wl->dev);
5490 pm_runtime_put_autosuspend(wl->dev);
5493 mutex_unlock(&wl->mutex);
5498 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5499 struct ieee80211_vif *vif,
5500 struct ieee80211_channel_switch *ch_switch)
5502 struct wl1271 *wl = hw->priv;
5503 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5506 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5508 wl1271_tx_flush(wl);
5510 mutex_lock(&wl->mutex);
5512 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5513 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5514 ieee80211_chswitch_done(vif, false);
5516 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5520 ret = pm_runtime_get_sync(wl->dev);
5522 pm_runtime_put_noidle(wl->dev);
5526 /* TODO: change mac80211 to pass vif as param */
5528 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5529 unsigned long delay_usec;
5531 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5535 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5537 /* indicate failure 5 seconds after channel switch time */
5538 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5540 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5541 usecs_to_jiffies(delay_usec) +
5542 msecs_to_jiffies(5000));
5546 pm_runtime_mark_last_busy(wl->dev);
5547 pm_runtime_put_autosuspend(wl->dev);
5550 mutex_unlock(&wl->mutex);
5553 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5554 struct wl12xx_vif *wlvif,
5557 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5558 struct sk_buff *beacon =
5559 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5564 return cfg80211_find_ie(eid,
5565 beacon->data + ieoffset,
5566 beacon->len - ieoffset);
5569 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5573 const struct ieee80211_channel_sw_ie *ie_csa;
5575 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5579 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5580 *csa_count = ie_csa->count;
5585 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5586 struct ieee80211_vif *vif,
5587 struct cfg80211_chan_def *chandef)
5589 struct wl1271 *wl = hw->priv;
5590 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5591 struct ieee80211_channel_switch ch_switch = {
5593 .chandef = *chandef,
5597 wl1271_debug(DEBUG_MAC80211,
5598 "mac80211 channel switch beacon (role %d)",
5601 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5603 wl1271_error("error getting beacon (for CSA counter)");
5607 mutex_lock(&wl->mutex);
5609 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5614 ret = pm_runtime_get_sync(wl->dev);
5616 pm_runtime_put_noidle(wl->dev);
5620 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5624 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5627 pm_runtime_mark_last_busy(wl->dev);
5628 pm_runtime_put_autosuspend(wl->dev);
5630 mutex_unlock(&wl->mutex);
5633 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5634 u32 queues, bool drop)
5636 struct wl1271 *wl = hw->priv;
5638 wl1271_tx_flush(wl);
5641 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5642 struct ieee80211_vif *vif,
5643 struct ieee80211_channel *chan,
5645 enum ieee80211_roc_type type)
5647 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5648 struct wl1271 *wl = hw->priv;
5649 int channel, active_roc, ret = 0;
5651 channel = ieee80211_frequency_to_channel(chan->center_freq);
5653 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5654 channel, wlvif->role_id);
5656 mutex_lock(&wl->mutex);
5658 if (unlikely(wl->state != WLCORE_STATE_ON))
5661 /* return EBUSY if we can't ROC right now */
5662 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5663 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5664 wl1271_warning("active roc on role %d", active_roc);
5669 ret = pm_runtime_get_sync(wl->dev);
5671 pm_runtime_put_noidle(wl->dev);
5675 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5680 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5681 msecs_to_jiffies(duration));
5683 pm_runtime_mark_last_busy(wl->dev);
5684 pm_runtime_put_autosuspend(wl->dev);
5686 mutex_unlock(&wl->mutex);
5690 static int __wlcore_roc_completed(struct wl1271 *wl)
5692 struct wl12xx_vif *wlvif;
5695 /* already completed */
5696 if (unlikely(!wl->roc_vif))
5699 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5701 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5704 ret = wl12xx_stop_dev(wl, wlvif);
5713 static int wlcore_roc_completed(struct wl1271 *wl)
5717 wl1271_debug(DEBUG_MAC80211, "roc complete");
5719 mutex_lock(&wl->mutex);
5721 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5726 ret = pm_runtime_get_sync(wl->dev);
5728 pm_runtime_put_noidle(wl->dev);
5732 ret = __wlcore_roc_completed(wl);
5734 pm_runtime_mark_last_busy(wl->dev);
5735 pm_runtime_put_autosuspend(wl->dev);
5737 mutex_unlock(&wl->mutex);
5742 static void wlcore_roc_complete_work(struct work_struct *work)
5744 struct delayed_work *dwork;
5748 dwork = to_delayed_work(work);
5749 wl = container_of(dwork, struct wl1271, roc_complete_work);
5751 ret = wlcore_roc_completed(wl);
5753 ieee80211_remain_on_channel_expired(wl->hw);
5756 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
5757 struct ieee80211_vif *vif)
5759 struct wl1271 *wl = hw->priv;
5761 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5764 wl1271_tx_flush(wl);
5767 * we can't just flush_work here, because it might deadlock
5768 * (as we might get called from the same workqueue)
5770 cancel_delayed_work_sync(&wl->roc_complete_work);
5771 wlcore_roc_completed(wl);
5776 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5777 struct ieee80211_vif *vif,
5778 struct ieee80211_sta *sta,
5781 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5783 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5785 if (!(changed & IEEE80211_RC_BW_CHANGED))
5788 /* this callback is atomic, so schedule a new work */
5789 wlvif->rc_update_bw = sta->bandwidth;
5790 memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5791 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5794 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5795 struct ieee80211_vif *vif,
5796 struct ieee80211_sta *sta,
5797 struct station_info *sinfo)
5799 struct wl1271 *wl = hw->priv;
5800 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5804 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5806 mutex_lock(&wl->mutex);
5808 if (unlikely(wl->state != WLCORE_STATE_ON))
5811 ret = pm_runtime_get_sync(wl->dev);
5813 pm_runtime_put_noidle(wl->dev);
5817 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5821 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5822 sinfo->signal = rssi_dbm;
5825 pm_runtime_mark_last_busy(wl->dev);
5826 pm_runtime_put_autosuspend(wl->dev);
5829 mutex_unlock(&wl->mutex);
5832 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5833 struct ieee80211_sta *sta)
5835 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5836 struct wl1271 *wl = hw->priv;
5837 u8 hlid = wl_sta->hlid;
5839 /* return in units of Kbps */
5840 return (wl->links[hlid].fw_rate_mbps * 1000);
5843 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5845 struct wl1271 *wl = hw->priv;
5848 mutex_lock(&wl->mutex);
5850 if (unlikely(wl->state != WLCORE_STATE_ON))
5853 /* packets are considered pending if in the TX queue or the FW */
5854 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5856 mutex_unlock(&wl->mutex);
5861 /* can't be const, mac80211 writes to this */
5862 static struct ieee80211_rate wl1271_rates[] = {
5864 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5865 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5867 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5868 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5869 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5871 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5872 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5873 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5875 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5876 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5877 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5879 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5880 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5882 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5883 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5885 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5886 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5888 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5889 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5891 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5892 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5894 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5895 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5897 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5898 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5900 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5901 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5904 /* can't be const, mac80211 writes to this */
5905 static struct ieee80211_channel wl1271_channels[] = {
5906 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5907 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5908 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5909 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5910 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5911 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5912 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5913 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5914 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5915 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5916 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5917 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5918 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5919 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5922 /* can't be const, mac80211 writes to this */
5923 static struct ieee80211_supported_band wl1271_band_2ghz = {
5924 .channels = wl1271_channels,
5925 .n_channels = ARRAY_SIZE(wl1271_channels),
5926 .bitrates = wl1271_rates,
5927 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5930 /* 5 GHz data rates for WL1273 */
5931 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5933 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5934 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5936 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5937 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5939 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5940 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5942 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5943 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5945 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5946 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5948 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5949 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5951 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5952 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5954 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5955 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5958 /* 5 GHz band channels for WL1273 */
5959 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5960 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5961 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5962 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5963 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5964 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5965 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5966 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5967 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5968 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5969 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5970 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5971 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5972 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5973 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5974 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5975 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5976 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5977 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5978 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5979 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5980 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5981 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5982 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5983 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5984 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5985 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5986 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5987 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5988 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5989 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5990 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5993 static struct ieee80211_supported_band wl1271_band_5ghz = {
5994 .channels = wl1271_channels_5ghz,
5995 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5996 .bitrates = wl1271_rates_5ghz,
5997 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6000 static const struct ieee80211_ops wl1271_ops = {
6001 .start = wl1271_op_start,
6002 .stop = wlcore_op_stop,
6003 .add_interface = wl1271_op_add_interface,
6004 .remove_interface = wl1271_op_remove_interface,
6005 .change_interface = wl12xx_op_change_interface,
6007 .suspend = wl1271_op_suspend,
6008 .resume = wl1271_op_resume,
6010 .config = wl1271_op_config,
6011 .prepare_multicast = wl1271_op_prepare_multicast,
6012 .configure_filter = wl1271_op_configure_filter,
6014 .set_key = wlcore_op_set_key,
6015 .hw_scan = wl1271_op_hw_scan,
6016 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
6017 .sched_scan_start = wl1271_op_sched_scan_start,
6018 .sched_scan_stop = wl1271_op_sched_scan_stop,
6019 .bss_info_changed = wl1271_op_bss_info_changed,
6020 .set_frag_threshold = wl1271_op_set_frag_threshold,
6021 .set_rts_threshold = wl1271_op_set_rts_threshold,
6022 .conf_tx = wl1271_op_conf_tx,
6023 .get_tsf = wl1271_op_get_tsf,
6024 .get_survey = wl1271_op_get_survey,
6025 .sta_state = wl12xx_op_sta_state,
6026 .ampdu_action = wl1271_op_ampdu_action,
6027 .tx_frames_pending = wl1271_tx_frames_pending,
6028 .set_bitrate_mask = wl12xx_set_bitrate_mask,
6029 .set_default_unicast_key = wl1271_op_set_default_key_idx,
6030 .channel_switch = wl12xx_op_channel_switch,
6031 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
6032 .flush = wlcore_op_flush,
6033 .remain_on_channel = wlcore_op_remain_on_channel,
6034 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6035 .add_chanctx = wlcore_op_add_chanctx,
6036 .remove_chanctx = wlcore_op_remove_chanctx,
6037 .change_chanctx = wlcore_op_change_chanctx,
6038 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6039 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6040 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6041 .sta_rc_update = wlcore_op_sta_rc_update,
6042 .sta_statistics = wlcore_op_sta_statistics,
6043 .get_expected_throughput = wlcore_op_get_expected_throughput,
6044 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6048 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6054 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6055 wl1271_error("Illegal RX rate from HW: %d", rate);
6059 idx = wl->band_rate_to_idx[band][rate];
6060 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6061 wl1271_error("Unsupported RX rate from HW: %d", rate);
6068 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6072 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6075 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6076 wl1271_warning("NIC part of the MAC address wraps around!");
6078 for (i = 0; i < wl->num_mac_addr; i++) {
6079 wl->addresses[i].addr[0] = (u8)(oui >> 16);
6080 wl->addresses[i].addr[1] = (u8)(oui >> 8);
6081 wl->addresses[i].addr[2] = (u8) oui;
6082 wl->addresses[i].addr[3] = (u8)(nic >> 16);
6083 wl->addresses[i].addr[4] = (u8)(nic >> 8);
6084 wl->addresses[i].addr[5] = (u8) nic;
6088 /* we may be one address short at the most */
6089 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6092 * turn on the LAA bit in the first address and use it as
6095 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6096 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6097 memcpy(&wl->addresses[idx], &wl->addresses[0],
6098 sizeof(wl->addresses[0]));
6100 wl->addresses[idx].addr[0] |= BIT(1);
6103 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6104 wl->hw->wiphy->addresses = wl->addresses;
6107 static int wl12xx_get_hw_info(struct wl1271 *wl)
6111 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6115 wl->fuse_oui_addr = 0;
6116 wl->fuse_nic_addr = 0;
6118 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6122 if (wl->ops->get_mac)
6123 ret = wl->ops->get_mac(wl);
6129 static int wl1271_register_hw(struct wl1271 *wl)
6132 u32 oui_addr = 0, nic_addr = 0;
6133 struct platform_device *pdev = wl->pdev;
6134 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6136 if (wl->mac80211_registered)
6139 if (wl->nvs_len >= 12) {
6140 /* NOTE: The wl->nvs->nvs element must be first, in
6141 * order to simplify the casting, we assume it is at
6142 * the beginning of the wl->nvs structure.
6144 u8 *nvs_ptr = (u8 *)wl->nvs;
6147 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6149 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6152 /* if the MAC address is zeroed in the NVS derive from fuse */
6153 if (oui_addr == 0 && nic_addr == 0) {
6154 oui_addr = wl->fuse_oui_addr;
6155 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6156 nic_addr = wl->fuse_nic_addr + 1;
6159 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6160 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6161 if (!strcmp(pdev_data->family->name, "wl18xx")) {
6162 wl1271_warning("This default nvs file can be removed from the file system");
6164 wl1271_warning("Your device performance is not optimized.");
6165 wl1271_warning("Please use the calibrator tool to configure your device.");
6168 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6169 wl1271_warning("Fuse mac address is zero. using random mac");
6170 /* Use TI oui and a random nic */
6171 oui_addr = WLCORE_TI_OUI_ADDRESS;
6172 nic_addr = get_random_int();
6174 oui_addr = wl->fuse_oui_addr;
6175 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6176 nic_addr = wl->fuse_nic_addr + 1;
6180 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6182 ret = ieee80211_register_hw(wl->hw);
6184 wl1271_error("unable to register mac80211 hw: %d", ret);
6188 wl->mac80211_registered = true;
6190 wl1271_debugfs_init(wl);
6192 wl1271_notice("loaded");
6198 static void wl1271_unregister_hw(struct wl1271 *wl)
6201 wl1271_plt_stop(wl);
6203 ieee80211_unregister_hw(wl->hw);
6204 wl->mac80211_registered = false;
6208 static int wl1271_init_ieee80211(struct wl1271 *wl)
6211 static const u32 cipher_suites[] = {
6212 WLAN_CIPHER_SUITE_WEP40,
6213 WLAN_CIPHER_SUITE_WEP104,
6214 WLAN_CIPHER_SUITE_TKIP,
6215 WLAN_CIPHER_SUITE_CCMP,
6216 WL1271_CIPHER_SUITE_GEM,
6219 /* The tx descriptor buffer */
6220 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6222 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6223 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6226 /* FIXME: find a proper value */
6227 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6229 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6230 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6231 ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
6232 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6233 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6234 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6235 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6236 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6237 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6238 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6239 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6240 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6241 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6242 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6243 ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6245 wl->hw->wiphy->cipher_suites = cipher_suites;
6246 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6248 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6249 BIT(NL80211_IFTYPE_AP) |
6250 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6251 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6252 #ifdef CONFIG_MAC80211_MESH
6253 BIT(NL80211_IFTYPE_MESH_POINT) |
6255 BIT(NL80211_IFTYPE_P2P_GO);
6257 wl->hw->wiphy->max_scan_ssids = 1;
6258 wl->hw->wiphy->max_sched_scan_ssids = 16;
6259 wl->hw->wiphy->max_match_sets = 16;
6261 * Maximum length of elements in scanning probe request templates
6262 * should be the maximum length possible for a template, without
6263 * the IEEE80211 header of the template
6265 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6266 sizeof(struct ieee80211_header);
6268 wl->hw->wiphy->max_sched_scan_reqs = 1;
6269 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6270 sizeof(struct ieee80211_header);
6272 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6274 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6275 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6276 WIPHY_FLAG_HAS_CHANNEL_SWITCH |
6277 WIPHY_FLAG_IBSS_RSN;
6279 wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6281 /* make sure all our channels fit in the scanned_ch bitmask */
6282 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6283 ARRAY_SIZE(wl1271_channels_5ghz) >
6284 WL1271_MAX_CHANNELS);
6286 * clear channel flags from the previous usage
6287 * and restore max_power & max_antenna_gain values.
6289 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6290 wl1271_band_2ghz.channels[i].flags = 0;
6291 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6292 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6295 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6296 wl1271_band_5ghz.channels[i].flags = 0;
6297 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6298 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6302 * We keep local copies of the band structs because we need to
6303 * modify them on a per-device basis.
6305 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6306 sizeof(wl1271_band_2ghz));
6307 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6308 &wl->ht_cap[NL80211_BAND_2GHZ],
6309 sizeof(*wl->ht_cap));
6310 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6311 sizeof(wl1271_band_5ghz));
6312 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6313 &wl->ht_cap[NL80211_BAND_5GHZ],
6314 sizeof(*wl->ht_cap));
6316 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6317 &wl->bands[NL80211_BAND_2GHZ];
6318 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6319 &wl->bands[NL80211_BAND_5GHZ];
6322 * allow 4 queues per mac address we support +
6323 * 1 cab queue per mac + one global offchannel Tx queue
6325 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6327 /* the last queue is the offchannel queue */
6328 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6329 wl->hw->max_rates = 1;
6331 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6333 /* the FW answers probe-requests in AP-mode */
6334 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6335 wl->hw->wiphy->probe_resp_offload =
6336 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6337 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6338 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6340 /* allowed interface combinations */
6341 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6342 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6344 /* register vendor commands */
6345 wlcore_set_vendor_commands(wl->hw->wiphy);
6347 SET_IEEE80211_DEV(wl->hw, wl->dev);
6349 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6350 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6352 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6357 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6360 struct ieee80211_hw *hw;
6365 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6367 wl1271_error("could not alloc ieee80211_hw");
6373 memset(wl, 0, sizeof(*wl));
6375 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6377 wl1271_error("could not alloc wl priv");
6379 goto err_priv_alloc;
6382 INIT_LIST_HEAD(&wl->wlvif_list);
6387 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6388 * we don't allocate any additional resource here, so that's fine.
6390 for (i = 0; i < NUM_TX_QUEUES; i++)
6391 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6392 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6394 skb_queue_head_init(&wl->deferred_rx_queue);
6395 skb_queue_head_init(&wl->deferred_tx_queue);
6397 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6398 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6399 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6400 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6401 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6402 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6404 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6405 if (!wl->freezable_wq) {
6412 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6413 wl->band = NL80211_BAND_2GHZ;
6414 wl->channel_type = NL80211_CHAN_NO_HT;
6416 wl->sg_enabled = true;
6417 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6418 wl->recovery_count = 0;
6421 wl->ap_fw_ps_map = 0;
6423 wl->system_hlid = WL12XX_SYSTEM_HLID;
6424 wl->active_sta_count = 0;
6425 wl->active_link_count = 0;
6428 /* The system link is always allocated */
6429 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6431 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6432 for (i = 0; i < wl->num_tx_desc; i++)
6433 wl->tx_frames[i] = NULL;
6435 spin_lock_init(&wl->wl_lock);
6437 wl->state = WLCORE_STATE_OFF;
6438 wl->fw_type = WL12XX_FW_TYPE_NONE;
6439 mutex_init(&wl->mutex);
6440 mutex_init(&wl->flush_mutex);
6441 init_completion(&wl->nvs_loading_complete);
6443 order = get_order(aggr_buf_size);
6444 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6445 if (!wl->aggr_buf) {
6449 wl->aggr_buf_size = aggr_buf_size;
6451 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6452 if (!wl->dummy_packet) {
6457 /* Allocate one page for the FW log */
6458 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6461 goto err_dummy_packet;
6464 wl->mbox_size = mbox_size;
6465 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6471 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6472 if (!wl->buffer_32) {
6483 free_page((unsigned long)wl->fwlog);
6486 dev_kfree_skb(wl->dummy_packet);
6489 free_pages((unsigned long)wl->aggr_buf, order);
6492 destroy_workqueue(wl->freezable_wq);
6495 wl1271_debugfs_exit(wl);
6499 ieee80211_free_hw(hw);
6503 return ERR_PTR(ret);
6505 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6507 int wlcore_free_hw(struct wl1271 *wl)
6509 /* Unblock any fwlog readers */
6510 mutex_lock(&wl->mutex);
6511 wl->fwlog_size = -1;
6512 mutex_unlock(&wl->mutex);
6514 wlcore_sysfs_free(wl);
6516 kfree(wl->buffer_32);
6518 free_page((unsigned long)wl->fwlog);
6519 dev_kfree_skb(wl->dummy_packet);
6520 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6522 wl1271_debugfs_exit(wl);
6526 wl->fw_type = WL12XX_FW_TYPE_NONE;
6530 kfree(wl->raw_fw_status);
6531 kfree(wl->fw_status);
6532 kfree(wl->tx_res_if);
6533 destroy_workqueue(wl->freezable_wq);
6536 ieee80211_free_hw(wl->hw);
6540 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6543 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6544 .flags = WIPHY_WOWLAN_ANY,
6545 .n_patterns = WL1271_MAX_RX_FILTERS,
6546 .pattern_min_len = 1,
6547 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6551 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6553 return IRQ_WAKE_THREAD;
6556 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6558 struct wl1271 *wl = context;
6559 struct platform_device *pdev = wl->pdev;
6560 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6561 struct resource *res;
6564 irq_handler_t hardirq_fn = NULL;
6567 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6569 wl1271_error("Could not allocate nvs data");
6572 wl->nvs_len = fw->size;
6573 } else if (pdev_data->family->nvs_name) {
6574 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6575 pdev_data->family->nvs_name);
6583 ret = wl->ops->setup(wl);
6587 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6589 /* adjust some runtime configuration parameters */
6590 wlcore_adjust_conf(wl);
6592 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6594 wl1271_error("Could not get IRQ resource");
6598 wl->irq = res->start;
6599 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6600 wl->if_ops = pdev_data->if_ops;
6602 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6603 hardirq_fn = wlcore_hardirq;
6605 wl->irq_flags |= IRQF_ONESHOT;
6607 ret = wl12xx_set_power_on(wl);
6611 ret = wl12xx_get_hw_info(wl);
6613 wl1271_error("couldn't get hw info");
6614 wl1271_power_off(wl);
6618 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6619 wl->irq_flags, pdev->name, wl);
6621 wl1271_error("interrupt configuration failed");
6622 wl1271_power_off(wl);
6627 device_init_wakeup(wl->dev, true);
6629 ret = enable_irq_wake(wl->irq);
6631 wl->irq_wake_enabled = true;
6632 if (pdev_data->pwr_in_suspend)
6633 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6636 res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6638 wl->wakeirq = res->start;
6639 wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6640 ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6642 wl->wakeirq = -ENODEV;
6644 wl->wakeirq = -ENODEV;
6647 disable_irq(wl->irq);
6648 wl1271_power_off(wl);
6650 ret = wl->ops->identify_chip(wl);
6654 ret = wl1271_init_ieee80211(wl);
6658 ret = wl1271_register_hw(wl);
6662 ret = wlcore_sysfs_init(wl);
6666 wl->initialized = true;
6670 wl1271_unregister_hw(wl);
6673 if (wl->wakeirq >= 0)
6674 dev_pm_clear_wake_irq(wl->dev);
6675 device_init_wakeup(wl->dev, false);
6676 free_irq(wl->irq, wl);
6682 release_firmware(fw);
6683 complete_all(&wl->nvs_loading_complete);
6686 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6688 struct wl1271 *wl = dev_get_drvdata(dev);
6689 struct wl12xx_vif *wlvif;
6692 /* We do not enter elp sleep in PLT mode */
6696 /* Nothing to do if no ELP mode requested */
6697 if (wl->sleep_auth != WL1271_PSM_ELP)
6700 wl12xx_for_each_wlvif(wl, wlvif) {
6701 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6702 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6706 wl1271_debug(DEBUG_PSM, "chip to elp");
6707 error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6709 wl12xx_queue_recovery_work(wl);
6714 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6719 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6721 struct wl1271 *wl = dev_get_drvdata(dev);
6722 DECLARE_COMPLETION_ONSTACK(compl);
6723 unsigned long flags;
6725 unsigned long start_time = jiffies;
6726 bool recovery = false;
6728 /* Nothing to do if no ELP mode requested */
6729 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6732 wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6734 spin_lock_irqsave(&wl->wl_lock, flags);
6735 wl->elp_compl = &compl;
6736 spin_unlock_irqrestore(&wl->wl_lock, flags);
6738 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6741 } else if (!test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) {
6742 ret = wait_for_completion_timeout(&compl,
6743 msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6745 wl1271_warning("ELP wakeup timeout!");
6750 spin_lock_irqsave(&wl->wl_lock, flags);
6751 wl->elp_compl = NULL;
6752 spin_unlock_irqrestore(&wl->wl_lock, flags);
6753 clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6756 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6757 wl12xx_queue_recovery_work(wl);
6759 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6760 jiffies_to_msecs(jiffies - start_time));
6766 static const struct dev_pm_ops wlcore_pm_ops = {
6767 SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6768 wlcore_runtime_resume,
6772 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6774 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6775 const char *nvs_name;
6778 if (!wl->ops || !wl->ptable || !pdev_data)
6781 wl->dev = &pdev->dev;
6783 platform_set_drvdata(pdev, wl);
6785 if (pdev_data->family && pdev_data->family->nvs_name) {
6786 nvs_name = pdev_data->family->nvs_name;
6787 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6788 nvs_name, &pdev->dev, GFP_KERNEL,
6791 wl1271_error("request_firmware_nowait failed for %s: %d",
6793 complete_all(&wl->nvs_loading_complete);
6796 wlcore_nvs_cb(NULL, wl);
6799 wl->dev->driver->pm = &wlcore_pm_ops;
6800 pm_runtime_set_autosuspend_delay(wl->dev, 50);
6801 pm_runtime_use_autosuspend(wl->dev);
6802 pm_runtime_enable(wl->dev);
6806 EXPORT_SYMBOL_GPL(wlcore_probe);
6808 int wlcore_remove(struct platform_device *pdev)
6810 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6811 struct wl1271 *wl = platform_get_drvdata(pdev);
6814 error = pm_runtime_get_sync(wl->dev);
6816 dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6818 wl->dev->driver->pm = NULL;
6820 if (pdev_data->family && pdev_data->family->nvs_name)
6821 wait_for_completion(&wl->nvs_loading_complete);
6822 if (!wl->initialized)
6825 if (wl->wakeirq >= 0) {
6826 dev_pm_clear_wake_irq(wl->dev);
6827 wl->wakeirq = -ENODEV;
6830 device_init_wakeup(wl->dev, false);
6832 if (wl->irq_wake_enabled)
6833 disable_irq_wake(wl->irq);
6835 wl1271_unregister_hw(wl);
6837 pm_runtime_put_sync(wl->dev);
6838 pm_runtime_dont_use_autosuspend(wl->dev);
6839 pm_runtime_disable(wl->dev);
6841 free_irq(wl->irq, wl);
6846 EXPORT_SYMBOL_GPL(wlcore_remove);
6848 u32 wl12xx_debug_level = DEBUG_NONE;
6849 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6850 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6851 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6853 module_param_named(fwlog, fwlog_param, charp, 0);
6854 MODULE_PARM_DESC(fwlog,
6855 "FW logger options: continuous, dbgpins or disable");
6857 module_param(fwlog_mem_blocks, int, 0600);
6858 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6860 module_param(bug_on_recovery, int, 0600);
6861 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6863 module_param(no_recovery, int, 0600);
6864 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6866 MODULE_LICENSE("GPL");
6867 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6868 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");