1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
33 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #include <net/mac80211.h>
66 #include "iwl-debug.h"
71 #include "fw/api/rs.h"
74 * Will return 0 even if the cmd failed when RFKILL is asserted unless
75 * CMD_WANT_SKB is set in cmd->flags.
77 int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
81 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
82 if (WARN_ON(mvm->d3_test_active))
87 * Synchronous commands from this op-mode must hold
88 * the mutex, this ensures we don't try to send two
89 * (or more) synchronous commands at a time.
91 if (!(cmd->flags & CMD_ASYNC)) {
92 lockdep_assert_held(&mvm->mutex);
93 if (!(cmd->flags & CMD_SEND_IN_IDLE))
94 iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD);
97 ret = iwl_trans_send_cmd(mvm->trans, cmd);
99 if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE)))
100 iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD);
103 * If the caller wants the SKB, then don't hide any problems, the
104 * caller might access the response buffer which will be NULL if
105 * the command failed.
107 if (cmd->flags & CMD_WANT_SKB)
110 /* Silently ignore failures if RFKILL is asserted */
111 if (!ret || ret == -ERFKILL)
116 int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
117 u32 flags, u16 len, const void *data)
119 struct iwl_host_cmd cmd = {
126 return iwl_mvm_send_cmd(mvm, &cmd);
130 * We assume that the caller set the status to the success value
132 int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
135 struct iwl_rx_packet *pkt;
136 struct iwl_cmd_response *resp;
139 lockdep_assert_held(&mvm->mutex);
141 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
142 if (WARN_ON(mvm->d3_test_active))
147 * Only synchronous commands can wait for status,
148 * we use WANT_SKB so the caller can't.
150 if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
151 "cmd flags %x", cmd->flags))
154 cmd->flags |= CMD_WANT_SKB;
156 ret = iwl_trans_send_cmd(mvm->trans, cmd);
157 if (ret == -ERFKILL) {
159 * The command failed because of RFKILL, don't update
160 * the status, leave it as success and return 0.
169 resp_len = iwl_rx_packet_payload_len(pkt);
170 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
175 resp = (void *)pkt->data;
176 *status = le32_to_cpu(resp->status);
183 * We assume that the caller set the status to the sucess value
185 int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
186 const void *data, u32 *status)
188 struct iwl_host_cmd cmd = {
194 return iwl_mvm_send_cmd_status(mvm, &cmd, status);
197 #define IWL_DECLARE_RATE_INFO(r) \
198 [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
201 * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
203 static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
204 IWL_DECLARE_RATE_INFO(1),
205 IWL_DECLARE_RATE_INFO(2),
206 IWL_DECLARE_RATE_INFO(5),
207 IWL_DECLARE_RATE_INFO(11),
208 IWL_DECLARE_RATE_INFO(6),
209 IWL_DECLARE_RATE_INFO(9),
210 IWL_DECLARE_RATE_INFO(12),
211 IWL_DECLARE_RATE_INFO(18),
212 IWL_DECLARE_RATE_INFO(24),
213 IWL_DECLARE_RATE_INFO(36),
214 IWL_DECLARE_RATE_INFO(48),
215 IWL_DECLARE_RATE_INFO(54),
218 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
219 enum nl80211_band band)
221 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
225 /* Legacy rate format, search for match in table */
226 if (band == NL80211_BAND_5GHZ)
227 band_offset = IWL_FIRST_OFDM_RATE;
228 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
229 if (fw_rate_idx_to_plcp[idx] == rate)
230 return idx - band_offset;
235 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
237 /* Get PLCP rate for tx_cmd->rate_n_flags */
238 return fw_rate_idx_to_plcp[rate_idx];
241 void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
243 struct iwl_rx_packet *pkt = rxb_addr(rxb);
244 struct iwl_error_resp *err_resp = (void *)pkt->data;
246 IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
247 le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
248 IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
249 le16_to_cpu(err_resp->bad_cmd_seq_num),
250 le32_to_cpu(err_resp->error_service));
251 IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
252 le64_to_cpu(err_resp->timestamp));
256 * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
257 * The parameter should also be a combination of ANT_[ABC].
259 u8 first_antenna(u8 mask)
261 BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
262 if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
264 return BIT(ffs(mask) - 1);
268 * Toggles between TX antennas to send the probe request on.
269 * Receives the bitmask of valid TX antennas and the *index* used
270 * for the last TX, and returns the next valid *index* to use.
271 * In order to set it in the tx_cmd, must do BIT(idx).
273 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
278 for (i = 0; i < MAX_ANT_NUM; i++) {
279 ind = (ind + 1) % MAX_ANT_NUM;
280 if (valid & BIT(ind))
284 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
288 #define FW_SYSASSERT_CPU_MASK 0xf0000000
289 static const struct {
292 } advanced_lookup[] = {
293 { "NMI_INTERRUPT_WDG", 0x34 },
294 { "SYSASSERT", 0x35 },
295 { "UCODE_VERSION_MISMATCH", 0x37 },
296 { "BAD_COMMAND", 0x38 },
297 { "BAD_COMMAND", 0x39 },
298 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
299 { "FATAL_ERROR", 0x3D },
300 { "NMI_TRM_HW_ERR", 0x46 },
301 { "NMI_INTERRUPT_TRM", 0x4C },
302 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
303 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
304 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
305 { "NMI_INTERRUPT_HOST", 0x66 },
306 { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
307 { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
308 { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
309 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
310 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
311 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
312 { "ADVANCED_SYSASSERT", 0 },
315 static const char *desc_lookup(u32 num)
319 for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
320 if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK))
321 return advanced_lookup[i].name;
323 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
324 return advanced_lookup[i].name;
328 * Note: This structure is read from the device with IO accesses,
329 * and the reading already does the endian conversion. As it is
330 * read with u32-sized accesses, any members with a different size
331 * need to be ordered correctly though!
333 struct iwl_error_event_table_v1 {
334 u32 valid; /* (nonzero) valid, (0) log is empty */
335 u32 error_id; /* type of error */
336 u32 pc; /* program counter */
337 u32 blink1; /* branch link */
338 u32 blink2; /* branch link */
339 u32 ilink1; /* interrupt link */
340 u32 ilink2; /* interrupt link */
341 u32 data1; /* error-specific data */
342 u32 data2; /* error-specific data */
343 u32 data3; /* error-specific data */
344 u32 bcon_time; /* beacon timer */
345 u32 tsf_low; /* network timestamp function timer */
346 u32 tsf_hi; /* network timestamp function timer */
347 u32 gp1; /* GP1 timer register */
348 u32 gp2; /* GP2 timer register */
349 u32 gp3; /* GP3 timer register */
350 u32 ucode_ver; /* uCode version */
351 u32 hw_ver; /* HW Silicon version */
352 u32 brd_ver; /* HW board version */
353 u32 log_pc; /* log program counter */
354 u32 frame_ptr; /* frame pointer */
355 u32 stack_ptr; /* stack pointer */
356 u32 hcmd; /* last host command header */
357 u32 isr0; /* isr status register LMPM_NIC_ISR0:
359 u32 isr1; /* isr status register LMPM_NIC_ISR1:
361 u32 isr2; /* isr status register LMPM_NIC_ISR2:
363 u32 isr3; /* isr status register LMPM_NIC_ISR3:
365 u32 isr4; /* isr status register LMPM_NIC_ISR4:
367 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
368 u32 wait_event; /* wait event() caller address */
369 u32 l2p_control; /* L2pControlField */
370 u32 l2p_duration; /* L2pDurationField */
371 u32 l2p_mhvalid; /* L2pMhValidBits */
372 u32 l2p_addr_match; /* L2pAddrMatchStat */
373 u32 lmpm_pmg_sel; /* indicate which clocks are turned on
375 u32 u_timestamp; /* indicate when the date and time of the
377 u32 flow_handler; /* FH read/write pointers, RX credit */
378 } __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
380 struct iwl_error_event_table {
381 u32 valid; /* (nonzero) valid, (0) log is empty */
382 u32 error_id; /* type of error */
383 u32 trm_hw_status0; /* TRM HW status */
384 u32 trm_hw_status1; /* TRM HW status */
385 u32 blink2; /* branch link */
386 u32 ilink1; /* interrupt link */
387 u32 ilink2; /* interrupt link */
388 u32 data1; /* error-specific data */
389 u32 data2; /* error-specific data */
390 u32 data3; /* error-specific data */
391 u32 bcon_time; /* beacon timer */
392 u32 tsf_low; /* network timestamp function timer */
393 u32 tsf_hi; /* network timestamp function timer */
394 u32 gp1; /* GP1 timer register */
395 u32 gp2; /* GP2 timer register */
396 u32 fw_rev_type; /* firmware revision type */
397 u32 major; /* uCode version major */
398 u32 minor; /* uCode version minor */
399 u32 hw_ver; /* HW Silicon version */
400 u32 brd_ver; /* HW board version */
401 u32 log_pc; /* log program counter */
402 u32 frame_ptr; /* frame pointer */
403 u32 stack_ptr; /* stack pointer */
404 u32 hcmd; /* last host command header */
405 u32 isr0; /* isr status register LMPM_NIC_ISR0:
407 u32 isr1; /* isr status register LMPM_NIC_ISR1:
409 u32 isr2; /* isr status register LMPM_NIC_ISR2:
411 u32 isr3; /* isr status register LMPM_NIC_ISR3:
413 u32 isr4; /* isr status register LMPM_NIC_ISR4:
415 u32 last_cmd_id; /* last HCMD id handled by the firmware */
416 u32 wait_event; /* wait event() caller address */
417 u32 l2p_control; /* L2pControlField */
418 u32 l2p_duration; /* L2pDurationField */
419 u32 l2p_mhvalid; /* L2pMhValidBits */
420 u32 l2p_addr_match; /* L2pAddrMatchStat */
421 u32 lmpm_pmg_sel; /* indicate which clocks are turned on
423 u32 u_timestamp; /* indicate when the date and time of the
425 u32 flow_handler; /* FH read/write pointers, RX credit */
426 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
429 * UMAC error struct - relevant starting from family 8000 chip.
430 * Note: This structure is read from the device with IO accesses,
431 * and the reading already does the endian conversion. As it is
432 * read with u32-sized accesses, any members with a different size
433 * need to be ordered correctly though!
435 struct iwl_umac_error_event_table {
436 u32 valid; /* (nonzero) valid, (0) log is empty */
437 u32 error_id; /* type of error */
438 u32 blink1; /* branch link */
439 u32 blink2; /* branch link */
440 u32 ilink1; /* interrupt link */
441 u32 ilink2; /* interrupt link */
442 u32 data1; /* error-specific data */
443 u32 data2; /* error-specific data */
444 u32 data3; /* error-specific data */
447 u32 frame_pointer; /* core register 27*/
448 u32 stack_pointer; /* core register 28 */
449 u32 cmd_header; /* latest host cmd sent to UMAC */
450 u32 nic_isr_pref; /* ISR status register */
453 #define ERROR_START_OFFSET (1 * sizeof(u32))
454 #define ERROR_ELEM_SIZE (7 * sizeof(u32))
456 static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
458 struct iwl_trans *trans = mvm->trans;
459 struct iwl_umac_error_event_table table;
460 u32 base = mvm->trans->umac_error_event_table;
462 if (!mvm->support_umac_log &&
463 !(mvm->trans->error_event_table_tlv_status &
464 IWL_ERROR_EVENT_TABLE_UMAC))
467 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
470 mvm->fwrt.dump.umac_err_id = table.error_id;
472 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
473 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
474 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
475 mvm->status, table.valid);
478 IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
479 desc_lookup(table.error_id));
480 IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
481 IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
482 IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
483 IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
484 IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
485 IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
486 IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
487 IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
488 IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
489 IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
490 IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
491 IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
492 IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
495 static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
497 struct iwl_trans *trans = mvm->trans;
498 struct iwl_error_event_table table;
499 u32 val, base = mvm->trans->lmac_error_event_table[lmac_num];
501 if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
503 base = mvm->fw->init_errlog_ptr;
506 base = mvm->fw->inst_errlog_ptr;
509 if (base < 0x400000) {
511 "Not valid error log pointer 0x%08X for %s uCode\n",
513 (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
518 /* check if there is a HW error */
519 val = iwl_trans_read_mem32(trans, base);
520 if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
523 IWL_ERR(trans, "HW error, resetting before reading\n");
525 /* reset the device */
526 iwl_trans_sw_reset(trans);
528 err = iwl_finish_nic_init(trans);
533 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
536 mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id;
538 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
539 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
540 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
541 mvm->status, table.valid);
544 /* Do not change this output - scripts rely on it */
546 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
548 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
549 desc_lookup(table.error_id));
550 IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
551 IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
552 IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
553 IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
554 IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
555 IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
556 IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
557 IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
558 IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
559 IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
560 IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
561 IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
562 IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
563 IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type);
564 IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
565 IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
566 IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
567 IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
568 IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
569 IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
570 IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
571 IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
572 IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
573 IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
574 IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id);
575 IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
576 IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
577 IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
578 IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
579 IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
580 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
581 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
582 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
585 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
587 if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
589 "DEVICE_ENABLED bit is not set. Aborting dump.\n");
593 iwl_mvm_dump_lmac_error_log(mvm, 0);
595 if (mvm->trans->lmac_error_event_table[1])
596 iwl_mvm_dump_lmac_error_log(mvm, 1);
598 iwl_mvm_dump_umac_error_log(mvm);
601 int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
602 int tid, int frame_limit, u16 ssn)
604 struct iwl_scd_txq_cfg_cmd cmd = {
606 .action = SCD_CFG_ENABLE_QUEUE,
607 .window = frame_limit,
609 .ssn = cpu_to_le16(ssn),
611 .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
612 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
617 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
620 if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
621 "Trying to reconfig unallocated queue %d\n", queue))
624 IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
626 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
627 WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
634 * iwl_mvm_send_lq_cmd() - Send link quality command
635 * @sync: This command can be sent synchronously.
637 * The link quality command is sent as the last step of station creation.
638 * This is the special case in which init is set and we call a callback in
639 * this case to clear the state indicating that station creation is in
642 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync)
644 struct iwl_host_cmd cmd = {
646 .len = { sizeof(struct iwl_lq_cmd), },
647 .flags = sync ? 0 : CMD_ASYNC,
651 if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
652 iwl_mvm_has_tlc_offload(mvm)))
655 return iwl_mvm_send_cmd(mvm, &cmd);
659 * iwl_mvm_update_smps - Get a request to change the SMPS mode
660 * @req_type: The part of the driver who call for a change.
661 * @smps_requests: The request to change the SMPS mode.
663 * Get a requst to change the SMPS mode,
664 * and change it according to all other requests in the driver.
666 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
667 enum iwl_mvm_smps_type_request req_type,
668 enum ieee80211_smps_mode smps_request)
670 struct iwl_mvm_vif *mvmvif;
671 enum ieee80211_smps_mode smps_mode;
674 lockdep_assert_held(&mvm->mutex);
676 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
677 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
680 if (vif->type == NL80211_IFTYPE_AP)
681 smps_mode = IEEE80211_SMPS_OFF;
683 smps_mode = IEEE80211_SMPS_AUTOMATIC;
685 mvmvif = iwl_mvm_vif_from_mac80211(vif);
686 mvmvif->smps_requests[req_type] = smps_request;
687 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
688 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) {
689 smps_mode = IEEE80211_SMPS_STATIC;
692 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
693 smps_mode = IEEE80211_SMPS_DYNAMIC;
696 ieee80211_request_smps(vif, smps_mode);
699 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
701 struct iwl_statistics_cmd scmd = {
702 .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
704 struct iwl_host_cmd cmd = {
705 .id = STATISTICS_CMD,
706 .len[0] = sizeof(scmd),
708 .flags = CMD_WANT_SKB,
712 ret = iwl_mvm_send_cmd(mvm, &cmd);
716 iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
720 iwl_mvm_accu_radio_stats(mvm);
725 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
727 mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
728 mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
729 mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
730 mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
733 static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
734 struct ieee80211_vif *vif)
736 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
737 bool *result = _data;
740 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
741 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
742 mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
747 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
751 lockdep_assert_held(&mvm->mutex);
753 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
756 if (mvm->cfg->rx_with_siso_diversity)
759 ieee80211_iterate_active_interfaces_atomic(
760 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
761 iwl_mvm_diversity_iter, &result);
766 void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
767 bool low_latency, u16 mac_id)
769 struct iwl_mac_low_latency_cmd cmd = {
770 .mac_id = cpu_to_le32(mac_id)
773 if (!fw_has_capa(&mvm->fw->ucode_capa,
774 IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
778 /* currently we don't care about the direction */
779 cmd.low_latency_rx = 1;
780 cmd.low_latency_tx = 1;
783 if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD,
785 0, sizeof(cmd), &cmd))
786 IWL_ERR(mvm, "Failed to send low latency command\n");
789 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
791 enum iwl_mvm_low_latency_cause cause)
793 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
797 lockdep_assert_held(&mvm->mutex);
799 prev = iwl_mvm_vif_low_latency(mvmvif);
800 iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause);
802 low_latency = iwl_mvm_vif_low_latency(mvmvif);
804 if (low_latency == prev)
807 iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
809 res = iwl_mvm_update_quotas(mvm, false, NULL);
813 iwl_mvm_bt_coex_vif_change(mvm);
815 return iwl_mvm_power_update_mac(mvm);
818 struct iwl_mvm_low_latency_iter {
820 bool result_per_band[NUM_NL80211_BANDS];
823 static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
825 struct iwl_mvm_low_latency_iter *result = _data;
826 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
827 enum nl80211_band band;
829 if (iwl_mvm_vif_low_latency(mvmvif)) {
830 result->result = true;
832 if (!mvmvif->phy_ctxt)
835 band = mvmvif->phy_ctxt->channel->band;
836 result->result_per_band[band] = true;
840 bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
842 struct iwl_mvm_low_latency_iter data = {};
844 ieee80211_iterate_active_interfaces_atomic(
845 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
846 iwl_mvm_ll_iter, &data);
851 bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
853 struct iwl_mvm_low_latency_iter data = {};
855 ieee80211_iterate_active_interfaces_atomic(
856 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
857 iwl_mvm_ll_iter, &data);
859 return data.result_per_band[band];
862 struct iwl_bss_iter_data {
863 struct ieee80211_vif *vif;
867 static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
868 struct ieee80211_vif *vif)
870 struct iwl_bss_iter_data *data = _data;
872 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
883 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
885 struct iwl_bss_iter_data bss_iter_data = {};
887 ieee80211_iterate_active_interfaces_atomic(
888 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
889 iwl_mvm_bss_iface_iterator, &bss_iter_data);
891 if (bss_iter_data.error) {
892 IWL_ERR(mvm, "More than one managed interface active!\n");
893 return ERR_PTR(-EINVAL);
896 return bss_iter_data.vif;
899 struct iwl_sta_iter_data {
903 static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
904 struct ieee80211_vif *vif)
906 struct iwl_sta_iter_data *data = _data;
908 if (vif->type != NL80211_IFTYPE_STATION)
911 if (vif->bss_conf.assoc)
915 bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
917 struct iwl_sta_iter_data data = {
921 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
922 IEEE80211_IFACE_ITER_NORMAL,
923 iwl_mvm_sta_iface_iterator,
928 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
929 struct ieee80211_vif *vif,
930 bool tdls, bool cmd_q)
932 struct iwl_fw_dbg_trigger_tlv *trigger;
933 struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
934 unsigned int default_timeout =
935 cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
937 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
939 * We can't know when the station is asleep or awake, so we
940 * must disable the queue hang detection.
942 if (fw_has_capa(&mvm->fw->ucode_capa,
943 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
944 vif && vif->type == NL80211_IFTYPE_AP)
945 return IWL_WATCHDOG_DISABLED;
946 return iwlmvm_mod_params.tfd_q_hang_detect ?
947 default_timeout : IWL_WATCHDOG_DISABLED;
950 trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
951 txq_timer = (void *)trigger->data;
954 return le32_to_cpu(txq_timer->tdls);
957 return le32_to_cpu(txq_timer->command_queue);
960 return default_timeout;
962 switch (ieee80211_vif_type_p2p(vif)) {
963 case NL80211_IFTYPE_ADHOC:
964 return le32_to_cpu(txq_timer->ibss);
965 case NL80211_IFTYPE_STATION:
966 return le32_to_cpu(txq_timer->bss);
967 case NL80211_IFTYPE_AP:
968 return le32_to_cpu(txq_timer->softap);
969 case NL80211_IFTYPE_P2P_CLIENT:
970 return le32_to_cpu(txq_timer->p2p_client);
971 case NL80211_IFTYPE_P2P_GO:
972 return le32_to_cpu(txq_timer->p2p_go);
973 case NL80211_IFTYPE_P2P_DEVICE:
974 return le32_to_cpu(txq_timer->p2p_device);
975 case NL80211_IFTYPE_MONITOR:
976 return default_timeout;
979 return mvm->cfg->base_params->wd_timeout;
983 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
986 struct iwl_fw_dbg_trigger_tlv *trig;
987 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
989 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
990 FW_DBG_TRIGGER_MLME);
994 trig_mlme = (void *)trig->data;
996 if (trig_mlme->stop_connection_loss &&
997 --trig_mlme->stop_connection_loss)
1000 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
1003 ieee80211_connection_loss(vif);
1006 void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
1007 struct ieee80211_vif *vif,
1008 const struct ieee80211_sta *sta,
1011 struct iwl_fw_dbg_trigger_tlv *trig;
1012 struct iwl_fw_dbg_trigger_ba *ba_trig;
1014 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
1019 ba_trig = (void *)trig->data;
1021 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
1024 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1025 "Frame from %pM timed out, tid %d",
1029 u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
1034 return (100 * airtime / elapsed) / USEC_PER_MSEC;
1037 static enum iwl_mvm_traffic_load
1038 iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
1040 u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
1042 if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
1043 return IWL_MVM_TRAFFIC_HIGH;
1044 if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
1045 return IWL_MVM_TRAFFIC_MEDIUM;
1047 return IWL_MVM_TRAFFIC_LOW;
1050 struct iwl_mvm_tcm_iter_data {
1051 struct iwl_mvm *mvm;
1055 static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
1057 struct iwl_mvm_tcm_iter_data *data = _data;
1058 struct iwl_mvm *mvm = data->mvm;
1059 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1060 bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
1062 if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
1065 low_latency = mvm->tcm.result.low_latency[mvmvif->id];
1067 if (!mvm->tcm.result.change[mvmvif->id] &&
1068 prev == low_latency) {
1069 iwl_mvm_update_quotas(mvm, false, NULL);
1073 if (prev != low_latency) {
1074 /* this sends traffic load and updates quota as well */
1075 iwl_mvm_update_low_latency(mvm, vif, low_latency,
1076 LOW_LATENCY_TRAFFIC);
1078 iwl_mvm_update_quotas(mvm, false, NULL);
1081 data->any_sent = true;
1084 static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
1086 struct iwl_mvm_tcm_iter_data data = {
1091 mutex_lock(&mvm->mutex);
1093 ieee80211_iterate_active_interfaces(
1094 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1095 iwl_mvm_tcm_iter, &data);
1097 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
1098 iwl_mvm_config_scan(mvm);
1100 mutex_unlock(&mvm->mutex);
1103 static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
1105 struct iwl_mvm *mvm;
1106 struct iwl_mvm_vif *mvmvif;
1107 struct ieee80211_vif *vif;
1109 mvmvif = container_of(wk, struct iwl_mvm_vif,
1110 uapsd_nonagg_detected_wk.work);
1111 vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
1114 if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
1117 /* remember that this AP is broken */
1118 memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
1119 vif->bss_conf.bssid, ETH_ALEN);
1120 mvm->uapsd_noagg_bssid_write_idx++;
1121 if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
1122 mvm->uapsd_noagg_bssid_write_idx = 0;
1124 iwl_mvm_connection_loss(mvm, vif,
1125 "AP isn't using AMPDU with uAPSD enabled");
1128 static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
1129 struct ieee80211_vif *vif)
1131 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1133 if (vif->type != NL80211_IFTYPE_STATION)
1136 if (!vif->bss_conf.assoc)
1139 if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
1140 !mvmvif->queue_params[IEEE80211_AC_VI].uapsd &&
1141 !mvmvif->queue_params[IEEE80211_AC_BE].uapsd &&
1142 !mvmvif->queue_params[IEEE80211_AC_BK].uapsd)
1145 if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
1148 mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
1150 "detected AP should do aggregation but isn't, likely due to U-APSD\n");
1151 schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ);
1154 static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
1155 unsigned int elapsed,
1158 u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
1161 struct ieee80211_vif *vif;
1163 rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
1165 if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
1166 mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
1169 if (iwl_mvm_has_new_rx_api(mvm)) {
1170 tpt = 8 * bytes; /* kbps */
1171 do_div(tpt, elapsed);
1172 rate *= 1000; /* kbps */
1173 if (tpt < 22 * rate / 100)
1177 * the rate here is actually the threshold, in 100Kbps units,
1178 * so do the needed conversion from bytes to 100Kbps:
1179 * 100kb = bits / (100 * 1000),
1180 * 100kbps = 100kb / (msecs / 1000) ==
1181 * (bits / (100 * 1000)) / (msecs / 1000) ==
1182 * bits / (100 * msecs)
1185 do_div(tpt, elapsed * 100);
1191 vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
1193 iwl_mvm_uapsd_agg_disconnect(mvm, vif);
1197 static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
1198 struct ieee80211_vif *vif)
1200 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1203 if (!mvmvif->phy_ctxt)
1206 band[mvmvif->id] = mvmvif->phy_ctxt->channel->band;
1209 static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
1213 unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
1214 unsigned int uapsd_elapsed =
1215 jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
1216 u32 total_airtime = 0;
1217 u32 band_airtime[NUM_NL80211_BANDS] = {0};
1218 u32 band[NUM_MAC_INDEX_DRIVER] = {0};
1220 bool low_latency = false;
1221 enum iwl_mvm_traffic_load load, band_load;
1222 bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
1225 mvm->tcm.ll_ts = ts;
1227 mvm->tcm.uapsd_nonagg_ts = ts;
1229 mvm->tcm.result.elapsed = elapsed;
1231 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1232 IEEE80211_IFACE_ITER_NORMAL,
1233 iwl_mvm_tcm_iterator,
1236 for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1237 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1239 u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
1241 total_airtime += airtime;
1242 band_airtime[band[mac]] += airtime;
1244 load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
1245 mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
1246 mvm->tcm.result.load[mac] = load;
1247 mvm->tcm.result.airtime[mac] = airtime;
1249 for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
1250 vo_vi_pkts += mdata->rx.pkts[ac] +
1253 /* enable immediately with enough packets but defer disabling */
1254 if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
1255 mvm->tcm.result.low_latency[mac] = true;
1257 mvm->tcm.result.low_latency[mac] = false;
1260 /* clear old data */
1261 memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1262 memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1264 low_latency |= mvm->tcm.result.low_latency[mac];
1266 if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
1267 iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
1269 /* clear old data */
1271 mdata->uapsd_nonagg_detect.rx_bytes = 0;
1272 memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1273 memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1276 load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
1277 mvm->tcm.result.global_change = load != mvm->tcm.result.global_load;
1278 mvm->tcm.result.global_load = load;
1280 for (i = 0; i < NUM_NL80211_BANDS; i++) {
1281 band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
1282 mvm->tcm.result.band_load[i] = band_load;
1286 * If the current load isn't low we need to force re-evaluation
1287 * in the TCM period, so that we can return to low load if there
1288 * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
1289 * triggered by traffic).
1291 if (load != IWL_MVM_TRAFFIC_LOW)
1292 return MVM_TCM_PERIOD;
1294 * If low-latency is active we need to force re-evaluation after
1295 * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
1296 * when there's no traffic at all.
1299 return MVM_LL_PERIOD;
1301 * Otherwise, we don't need to run the work struct because we're
1302 * in the default "idle" state - traffic indication is low (which
1303 * also covers the "no traffic" case) and low-latency is disabled
1304 * so there's no state that may need to be disabled when there's
1305 * no traffic at all.
1307 * Note that this has no impact on the regular scheduling of the
1308 * updates triggered by traffic - those happen whenever one of the
1309 * two timeouts expire (if there's traffic at all.)
1314 void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
1316 unsigned long ts = jiffies;
1318 time_after(ts, mvm->tcm.uapsd_nonagg_ts +
1319 msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
1321 spin_lock(&mvm->tcm.lock);
1322 if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1323 spin_unlock(&mvm->tcm.lock);
1326 spin_unlock(&mvm->tcm.lock);
1328 if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
1329 mutex_lock(&mvm->mutex);
1330 if (iwl_mvm_request_statistics(mvm, true))
1331 handle_uapsd = false;
1332 mutex_unlock(&mvm->mutex);
1335 spin_lock(&mvm->tcm.lock);
1336 /* re-check if somebody else won the recheck race */
1337 if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1338 /* calculate statistics */
1339 unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
1342 /* the memset needs to be visible before the timestamp */
1346 schedule_delayed_work(&mvm->tcm.work, work_delay);
1348 spin_unlock(&mvm->tcm.lock);
1350 iwl_mvm_tcm_results(mvm);
1353 void iwl_mvm_tcm_work(struct work_struct *work)
1355 struct delayed_work *delayed_work = to_delayed_work(work);
1356 struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
1359 iwl_mvm_recalc_tcm(mvm);
1362 void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
1364 spin_lock_bh(&mvm->tcm.lock);
1365 mvm->tcm.paused = true;
1366 spin_unlock_bh(&mvm->tcm.lock);
1368 cancel_delayed_work_sync(&mvm->tcm.work);
1371 void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
1374 bool low_latency = false;
1376 spin_lock_bh(&mvm->tcm.lock);
1377 mvm->tcm.ts = jiffies;
1378 mvm->tcm.ll_ts = jiffies;
1379 for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1380 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1382 memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1383 memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1384 memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1385 memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1387 if (mvm->tcm.result.low_latency[mac])
1390 /* The TCM data needs to be reset before "paused" flag changes */
1392 mvm->tcm.paused = false;
1395 * if the current load is not low or low latency is active, force
1396 * re-evaluation to cover the case of no traffic.
1398 if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW)
1399 schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
1400 else if (low_latency)
1401 schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
1403 spin_unlock_bh(&mvm->tcm.lock);
1406 void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1408 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1410 INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
1411 iwl_mvm_tcm_uapsd_nonagg_detected_wk);
1414 void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1416 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1418 cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
1421 u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
1423 u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
1425 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
1426 mvm->trans->cfg->gp2_reg_addr)
1427 reg_addr = mvm->trans->cfg->gp2_reg_addr;
1429 return iwl_read_prph(mvm->trans, reg_addr);
1432 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
1436 lockdep_assert_held(&mvm->mutex);
1438 /* Disable power save when reading GP2 */
1439 ps_disabled = mvm->ps_disabled;
1441 mvm->ps_disabled = true;
1442 iwl_mvm_power_update_device(mvm);
1445 *gp2 = iwl_mvm_get_systime(mvm);
1446 *boottime = ktime_get_boot_ns();
1449 mvm->ps_disabled = ps_disabled;
1450 iwl_mvm_power_update_device(mvm);