wifi: rtw88: refine register based H2C command
[linux-2.6-microblaze.git] / drivers / net / wireless / realtek / rtw88 / fw.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019  Realtek Corporation
3  */
4
5 #include <linux/iopoll.h>
6
7 #include "main.h"
8 #include "coex.h"
9 #include "fw.h"
10 #include "tx.h"
11 #include "reg.h"
12 #include "sec.h"
13 #include "debug.h"
14 #include "util.h"
15 #include "wow.h"
16 #include "ps.h"
17 #include "phy.h"
18 #include "mac.h"
19
20 static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
21                                       struct sk_buff *skb)
22 {
23         struct rtw_c2h_cmd *c2h;
24         u8 sub_cmd_id;
25
26         c2h = get_c2h_from_skb(skb);
27         sub_cmd_id = c2h->payload[0];
28
29         switch (sub_cmd_id) {
30         case C2H_CCX_RPT:
31                 rtw_tx_report_handle(rtwdev, skb, C2H_CCX_RPT);
32                 break;
33         case C2H_SCAN_STATUS_RPT:
34                 rtw_hw_scan_status_report(rtwdev, skb);
35                 break;
36         case C2H_CHAN_SWITCH:
37                 rtw_hw_scan_chan_switch(rtwdev, skb);
38                 break;
39         default:
40                 break;
41         }
42 }
43
44 static u16 get_max_amsdu_len(u32 bit_rate)
45 {
46         /* lower than ofdm, do not aggregate */
47         if (bit_rate < 550)
48                 return 1;
49
50         /* lower than 20M 2ss mcs8, make it small */
51         if (bit_rate < 1800)
52                 return 1200;
53
54         /* lower than 40M 2ss mcs9, make it medium */
55         if (bit_rate < 4000)
56                 return 2600;
57
58         /* not yet 80M 2ss mcs8/9, make it twice regular packet size */
59         if (bit_rate < 7000)
60                 return 3500;
61
62         /* unlimited */
63         return 0;
64 }
65
66 struct rtw_fw_iter_ra_data {
67         struct rtw_dev *rtwdev;
68         u8 *payload;
69 };
70
71 static void rtw_fw_ra_report_iter(void *data, struct ieee80211_sta *sta)
72 {
73         struct rtw_fw_iter_ra_data *ra_data = data;
74         struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
75         u8 mac_id, rate, sgi, bw;
76         u8 mcs, nss;
77         u32 bit_rate;
78
79         mac_id = GET_RA_REPORT_MACID(ra_data->payload);
80         if (si->mac_id != mac_id)
81                 return;
82
83         si->ra_report.txrate.flags = 0;
84
85         rate = GET_RA_REPORT_RATE(ra_data->payload);
86         sgi = GET_RA_REPORT_SGI(ra_data->payload);
87         bw = GET_RA_REPORT_BW(ra_data->payload);
88
89         if (rate < DESC_RATEMCS0) {
90                 si->ra_report.txrate.legacy = rtw_desc_to_bitrate(rate);
91                 goto legacy;
92         }
93
94         rtw_desc_to_mcsrate(rate, &mcs, &nss);
95         if (rate >= DESC_RATEVHT1SS_MCS0)
96                 si->ra_report.txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
97         else if (rate >= DESC_RATEMCS0)
98                 si->ra_report.txrate.flags |= RATE_INFO_FLAGS_MCS;
99
100         if (rate >= DESC_RATEMCS0) {
101                 si->ra_report.txrate.mcs = mcs;
102                 si->ra_report.txrate.nss = nss;
103         }
104
105         if (sgi)
106                 si->ra_report.txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
107
108         if (bw == RTW_CHANNEL_WIDTH_80)
109                 si->ra_report.txrate.bw = RATE_INFO_BW_80;
110         else if (bw == RTW_CHANNEL_WIDTH_40)
111                 si->ra_report.txrate.bw = RATE_INFO_BW_40;
112         else
113                 si->ra_report.txrate.bw = RATE_INFO_BW_20;
114
115 legacy:
116         bit_rate = cfg80211_calculate_bitrate(&si->ra_report.txrate);
117
118         si->ra_report.desc_rate = rate;
119         si->ra_report.bit_rate = bit_rate;
120
121         sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
122 }
123
124 static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
125                                     u8 length)
126 {
127         struct rtw_fw_iter_ra_data ra_data;
128
129         if (WARN(length < 7, "invalid ra report c2h length\n"))
130                 return;
131
132         rtwdev->dm_info.tx_rate = GET_RA_REPORT_RATE(payload);
133         ra_data.rtwdev = rtwdev;
134         ra_data.payload = payload;
135         rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data);
136 }
137
138 struct rtw_beacon_filter_iter_data {
139         struct rtw_dev *rtwdev;
140         u8 *payload;
141 };
142
143 static void rtw_fw_bcn_filter_notify_vif_iter(void *data, u8 *mac,
144                                               struct ieee80211_vif *vif)
145 {
146         struct rtw_beacon_filter_iter_data *iter_data = data;
147         struct rtw_dev *rtwdev = iter_data->rtwdev;
148         u8 *payload = iter_data->payload;
149         u8 type = GET_BCN_FILTER_NOTIFY_TYPE(payload);
150         u8 event = GET_BCN_FILTER_NOTIFY_EVENT(payload);
151         s8 sig = (s8)GET_BCN_FILTER_NOTIFY_RSSI(payload);
152
153         switch (type) {
154         case BCN_FILTER_NOTIFY_SIGNAL_CHANGE:
155                 event = event ? NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH :
156                         NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
157                 ieee80211_cqm_rssi_notify(vif, event, sig, GFP_KERNEL);
158                 break;
159         case BCN_FILTER_CONNECTION_LOSS:
160                 ieee80211_connection_loss(vif);
161                 break;
162         case BCN_FILTER_CONNECTED:
163                 rtwdev->beacon_loss = false;
164                 break;
165         case BCN_FILTER_NOTIFY_BEACON_LOSS:
166                 rtwdev->beacon_loss = true;
167                 rtw_leave_lps(rtwdev);
168                 break;
169         }
170 }
171
172 static void rtw_fw_bcn_filter_notify(struct rtw_dev *rtwdev, u8 *payload,
173                                      u8 length)
174 {
175         struct rtw_beacon_filter_iter_data dev_iter_data;
176
177         dev_iter_data.rtwdev = rtwdev;
178         dev_iter_data.payload = payload;
179         rtw_iterate_vifs(rtwdev, rtw_fw_bcn_filter_notify_vif_iter,
180                          &dev_iter_data);
181 }
182
183 static void rtw_fw_scan_result(struct rtw_dev *rtwdev, u8 *payload,
184                                u8 length)
185 {
186         struct rtw_dm_info *dm_info = &rtwdev->dm_info;
187
188         dm_info->scan_density = payload[0];
189
190         rtw_dbg(rtwdev, RTW_DBG_FW, "scan.density = %x\n",
191                 dm_info->scan_density);
192 }
193
194 static void rtw_fw_adaptivity_result(struct rtw_dev *rtwdev, u8 *payload,
195                                      u8 length)
196 {
197         struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th;
198         struct rtw_c2h_adaptivity *result = (struct rtw_c2h_adaptivity *)payload;
199
200         rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
201                 "Adaptivity: density %x igi %x l2h_th_init %x l2h %x h2l %x option %x\n",
202                 result->density, result->igi, result->l2h_th_init, result->l2h,
203                 result->h2l, result->option);
204
205         rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "Reg Setting: L2H %x H2L %x\n",
206                 rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr,
207                                 edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask),
208                 rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr,
209                                 edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask));
210
211         rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "EDCCA Flag %s\n",
212                 rtw_read32_mask(rtwdev, REG_EDCCA_REPORT, BIT_EDCCA_FLAG) ?
213                 "Set" : "Unset");
214 }
215
216 void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
217 {
218         struct rtw_c2h_cmd *c2h;
219         u32 pkt_offset;
220         u8 len;
221
222         pkt_offset = *((u32 *)skb->cb);
223         c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
224         len = skb->len - pkt_offset - 2;
225
226         mutex_lock(&rtwdev->mutex);
227
228         if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
229                 goto unlock;
230
231         switch (c2h->id) {
232         case C2H_CCX_TX_RPT:
233                 rtw_tx_report_handle(rtwdev, skb, C2H_CCX_TX_RPT);
234                 break;
235         case C2H_BT_INFO:
236                 rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
237                 break;
238         case C2H_BT_HID_INFO:
239                 rtw_coex_bt_hid_info_notify(rtwdev, c2h->payload, len);
240                 break;
241         case C2H_WLAN_INFO:
242                 rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
243                 break;
244         case C2H_BCN_FILTER_NOTIFY:
245                 rtw_fw_bcn_filter_notify(rtwdev, c2h->payload, len);
246                 break;
247         case C2H_HALMAC:
248                 rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
249                 break;
250         case C2H_RA_RPT:
251                 rtw_fw_ra_report_handle(rtwdev, c2h->payload, len);
252                 break;
253         default:
254                 rtw_dbg(rtwdev, RTW_DBG_FW, "C2H 0x%x isn't handled\n", c2h->id);
255                 break;
256         }
257
258 unlock:
259         mutex_unlock(&rtwdev->mutex);
260 }
261
262 void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
263                                struct sk_buff *skb)
264 {
265         struct rtw_c2h_cmd *c2h;
266         u8 len;
267
268         c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
269         len = skb->len - pkt_offset - 2;
270         *((u32 *)skb->cb) = pkt_offset;
271
272         rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n",
273                 c2h->id, c2h->seq, len);
274
275         switch (c2h->id) {
276         case C2H_BT_MP_INFO:
277                 rtw_coex_info_response(rtwdev, skb);
278                 break;
279         case C2H_WLAN_RFON:
280                 complete(&rtwdev->lps_leave_check);
281                 dev_kfree_skb_any(skb);
282                 break;
283         case C2H_SCAN_RESULT:
284                 complete(&rtwdev->fw_scan_density);
285                 rtw_fw_scan_result(rtwdev, c2h->payload, len);
286                 dev_kfree_skb_any(skb);
287                 break;
288         case C2H_ADAPTIVITY:
289                 rtw_fw_adaptivity_result(rtwdev, c2h->payload, len);
290                 dev_kfree_skb_any(skb);
291                 break;
292         default:
293                 /* pass offset for further operation */
294                 *((u32 *)skb->cb) = pkt_offset;
295                 skb_queue_tail(&rtwdev->c2h_queue, skb);
296                 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
297                 break;
298         }
299 }
300 EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe);
301
302 void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev)
303 {
304         if (rtw_read8(rtwdev, REG_MCU_TST_CFG) == VAL_FW_TRIGGER)
305                 rtw_fw_recovery(rtwdev);
306         else
307                 rtw_warn(rtwdev, "unhandled firmware c2h interrupt\n");
308 }
309 EXPORT_SYMBOL(rtw_fw_c2h_cmd_isr);
310
311 static void rtw_fw_send_h2c_command_register(struct rtw_dev *rtwdev,
312                                              struct rtw_h2c_register *h2c)
313 {
314         u32 box_reg, box_ex_reg;
315         u8 box_state, box;
316         int ret;
317
318         rtw_dbg(rtwdev, RTW_DBG_FW, "send H2C content %08x %08x\n", h2c->w0,
319                 h2c->w1);
320
321         lockdep_assert_held(&rtwdev->mutex);
322
323         box = rtwdev->h2c.last_box_num;
324         switch (box) {
325         case 0:
326                 box_reg = REG_HMEBOX0;
327                 box_ex_reg = REG_HMEBOX0_EX;
328                 break;
329         case 1:
330                 box_reg = REG_HMEBOX1;
331                 box_ex_reg = REG_HMEBOX1_EX;
332                 break;
333         case 2:
334                 box_reg = REG_HMEBOX2;
335                 box_ex_reg = REG_HMEBOX2_EX;
336                 break;
337         case 3:
338                 box_reg = REG_HMEBOX3;
339                 box_ex_reg = REG_HMEBOX3_EX;
340                 break;
341         default:
342                 WARN(1, "invalid h2c mail box number\n");
343                 return;
344         }
345
346         ret = read_poll_timeout_atomic(rtw_read8, box_state,
347                                        !((box_state >> box) & 0x1), 100, 3000,
348                                        false, rtwdev, REG_HMETFR);
349
350         if (ret) {
351                 rtw_err(rtwdev, "failed to send h2c command\n");
352                 return;
353         }
354
355         rtw_write32(rtwdev, box_ex_reg, h2c->w1);
356         rtw_write32(rtwdev, box_reg, h2c->w0);
357
358         if (++rtwdev->h2c.last_box_num >= 4)
359                 rtwdev->h2c.last_box_num = 0;
360 }
361
362 static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
363                                     u8 *h2c)
364 {
365         struct rtw_h2c_cmd *h2c_cmd = (struct rtw_h2c_cmd *)h2c;
366         u8 box;
367         u8 box_state;
368         u32 box_reg, box_ex_reg;
369         int ret;
370
371         rtw_dbg(rtwdev, RTW_DBG_FW,
372                 "send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n",
373                 h2c[3], h2c[2], h2c[1], h2c[0],
374                 h2c[7], h2c[6], h2c[5], h2c[4]);
375
376         lockdep_assert_held(&rtwdev->mutex);
377
378         box = rtwdev->h2c.last_box_num;
379         switch (box) {
380         case 0:
381                 box_reg = REG_HMEBOX0;
382                 box_ex_reg = REG_HMEBOX0_EX;
383                 break;
384         case 1:
385                 box_reg = REG_HMEBOX1;
386                 box_ex_reg = REG_HMEBOX1_EX;
387                 break;
388         case 2:
389                 box_reg = REG_HMEBOX2;
390                 box_ex_reg = REG_HMEBOX2_EX;
391                 break;
392         case 3:
393                 box_reg = REG_HMEBOX3;
394                 box_ex_reg = REG_HMEBOX3_EX;
395                 break;
396         default:
397                 WARN(1, "invalid h2c mail box number\n");
398                 return;
399         }
400
401         ret = read_poll_timeout_atomic(rtw_read8, box_state,
402                                        !((box_state >> box) & 0x1), 100, 3000,
403                                        false, rtwdev, REG_HMETFR);
404
405         if (ret) {
406                 rtw_err(rtwdev, "failed to send h2c command\n");
407                 return;
408         }
409
410         rtw_write32(rtwdev, box_ex_reg, le32_to_cpu(h2c_cmd->msg_ext));
411         rtw_write32(rtwdev, box_reg, le32_to_cpu(h2c_cmd->msg));
412
413         if (++rtwdev->h2c.last_box_num >= 4)
414                 rtwdev->h2c.last_box_num = 0;
415 }
416
417 void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c)
418 {
419         rtw_fw_send_h2c_command(rtwdev, h2c);
420 }
421
422 static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt)
423 {
424         int ret;
425
426         lockdep_assert_held(&rtwdev->mutex);
427
428         FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, rtwdev->h2c.seq);
429         ret = rtw_hci_write_data_h2c(rtwdev, h2c_pkt, H2C_PKT_SIZE);
430         if (ret)
431                 rtw_err(rtwdev, "failed to send h2c packet\n");
432         rtwdev->h2c.seq++;
433 }
434
435 void
436 rtw_fw_send_general_info(struct rtw_dev *rtwdev)
437 {
438         struct rtw_fifo_conf *fifo = &rtwdev->fifo;
439         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
440         u16 total_size = H2C_PKT_HDR_SIZE + 4;
441
442         if (rtw_chip_wcpu_11n(rtwdev))
443                 return;
444
445         rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO);
446
447         SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
448
449         GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt,
450                                         fifo->rsvd_fw_txbuf_addr -
451                                         fifo->rsvd_boundary);
452
453         rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
454 }
455
456 void
457 rtw_fw_send_phydm_info(struct rtw_dev *rtwdev)
458 {
459         struct rtw_hal *hal = &rtwdev->hal;
460         struct rtw_efuse *efuse = &rtwdev->efuse;
461         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
462         u16 total_size = H2C_PKT_HDR_SIZE + 8;
463         u8 fw_rf_type = 0;
464
465         if (rtw_chip_wcpu_11n(rtwdev))
466                 return;
467
468         if (hal->rf_type == RF_1T1R)
469                 fw_rf_type = FW_RF_1T1R;
470         else if (hal->rf_type == RF_2T2R)
471                 fw_rf_type = FW_RF_2T2R;
472
473         rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_PHYDM_INFO);
474
475         SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
476         PHYDM_INFO_SET_REF_TYPE(h2c_pkt, efuse->rfe_option);
477         PHYDM_INFO_SET_RF_TYPE(h2c_pkt, fw_rf_type);
478         PHYDM_INFO_SET_CUT_VER(h2c_pkt, hal->cut_version);
479         PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, hal->antenna_tx);
480         PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, hal->antenna_rx);
481
482         rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
483 }
484
485 void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para)
486 {
487         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
488         u16 total_size = H2C_PKT_HDR_SIZE + 1;
489
490         rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_IQK);
491         SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
492         IQK_SET_CLEAR(h2c_pkt, para->clear);
493         IQK_SET_SEGMENT_IQK(h2c_pkt, para->segment_iqk);
494
495         rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
496 }
497 EXPORT_SYMBOL(rtw_fw_do_iqk);
498
499 void rtw_fw_inform_rfk_status(struct rtw_dev *rtwdev, bool start)
500 {
501         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
502
503         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WIFI_CALIBRATION);
504
505         RFK_SET_INFORM_START(h2c_pkt, start);
506
507         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
508 }
509 EXPORT_SYMBOL(rtw_fw_inform_rfk_status);
510
511 void rtw_fw_query_bt_info(struct rtw_dev *rtwdev)
512 {
513         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
514
515         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_INFO);
516
517         SET_QUERY_BT_INFO(h2c_pkt, true);
518
519         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
520 }
521
522 void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw)
523 {
524         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
525
526         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_CH_INFO);
527
528         SET_WL_CH_INFO_LINK(h2c_pkt, link);
529         SET_WL_CH_INFO_CHNL(h2c_pkt, ch);
530         SET_WL_CH_INFO_BW(h2c_pkt, bw);
531
532         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
533 }
534
535 void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev,
536                              struct rtw_coex_info_req *req)
537 {
538         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
539
540         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_MP_INFO);
541
542         SET_BT_MP_INFO_SEQ(h2c_pkt, req->seq);
543         SET_BT_MP_INFO_OP_CODE(h2c_pkt, req->op_code);
544         SET_BT_MP_INFO_PARA1(h2c_pkt, req->para1);
545         SET_BT_MP_INFO_PARA2(h2c_pkt, req->para2);
546         SET_BT_MP_INFO_PARA3(h2c_pkt, req->para3);
547
548         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
549 }
550
551 void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl)
552 {
553         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
554         u8 index = 0 - bt_pwr_dec_lvl;
555
556         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_FORCE_BT_TX_POWER);
557
558         SET_BT_TX_POWER_INDEX(h2c_pkt, index);
559
560         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
561 }
562
563 void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable)
564 {
565         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
566
567         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_IGNORE_WLAN_ACTION);
568
569         SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, enable);
570
571         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
572 }
573
574 void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
575                            u8 para1, u8 para2, u8 para3, u8 para4, u8 para5)
576 {
577         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
578
579         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_COEX_TDMA_TYPE);
580
581         SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, para1);
582         SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, para2);
583         SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, para3);
584         SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, para4);
585         SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, para5);
586
587         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
588 }
589
590 void rtw_fw_coex_query_hid_info(struct rtw_dev *rtwdev, u8 sub_id, u8 data)
591 {
592         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
593
594         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_HID_INFO);
595
596         SET_COEX_QUERY_HID_INFO_SUBID(h2c_pkt, sub_id);
597         SET_COEX_QUERY_HID_INFO_DATA1(h2c_pkt, data);
598
599         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
600 }
601
602 void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data)
603 {
604         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
605
606         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BT_WIFI_CONTROL);
607
608         SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, op_code);
609
610         SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, *data);
611         SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, *(data + 1));
612         SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, *(data + 2));
613         SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, *(data + 3));
614         SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, *(data + 4));
615
616         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
617 }
618
619 void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
620 {
621         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
622         u8 rssi = ewma_rssi_read(&si->avg_rssi);
623         bool stbc_en = si->stbc_en ? true : false;
624
625         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSSI_MONITOR);
626
627         SET_RSSI_INFO_MACID(h2c_pkt, si->mac_id);
628         SET_RSSI_INFO_RSSI(h2c_pkt, rssi);
629         SET_RSSI_INFO_STBC(h2c_pkt, stbc_en);
630
631         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
632 }
633
634 void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
635                          bool reset_ra_mask)
636 {
637         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
638         bool disable_pt = true;
639
640         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
641
642         SET_RA_INFO_MACID(h2c_pkt, si->mac_id);
643         SET_RA_INFO_RATE_ID(h2c_pkt, si->rate_id);
644         SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv);
645         SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
646         SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
647         SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en);
648         SET_RA_INFO_NO_UPDATE(h2c_pkt, !reset_ra_mask);
649         SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
650         SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
651         SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff));
652         SET_RA_INFO_RA_MASK1(h2c_pkt, (si->ra_mask & 0xff00) >> 8);
653         SET_RA_INFO_RA_MASK2(h2c_pkt, (si->ra_mask & 0xff0000) >> 16);
654         SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24);
655
656         si->init_ra_lv = 0;
657
658         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
659 }
660
661 void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect)
662 {
663         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
664
665         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_MEDIA_STATUS_RPT);
666         MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, connect);
667         MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, mac_id);
668
669         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
670 }
671
672 void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev)
673 {
674         struct rtw_traffic_stats *stats = &rtwdev->stats;
675         struct rtw_dm_info *dm_info = &rtwdev->dm_info;
676         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
677
678         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_PHY_INFO);
679         SET_WL_PHY_INFO_TX_TP(h2c_pkt, stats->tx_throughput);
680         SET_WL_PHY_INFO_RX_TP(h2c_pkt, stats->rx_throughput);
681         SET_WL_PHY_INFO_TX_RATE_DESC(h2c_pkt, dm_info->tx_rate);
682         SET_WL_PHY_INFO_RX_RATE_DESC(h2c_pkt, dm_info->curr_rx_rate);
683         SET_WL_PHY_INFO_RX_EVM(h2c_pkt, dm_info->rx_evm_dbm[RF_PATH_A]);
684         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
685 }
686
687 void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
688                                  struct ieee80211_vif *vif)
689 {
690         struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
691         struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
692         static const u8 rssi_min = 0, rssi_max = 100, rssi_offset = 100;
693         struct rtw_sta_info *si =
694                 sta ? (struct rtw_sta_info *)sta->drv_priv : NULL;
695         s32 threshold = bss_conf->cqm_rssi_thold + rssi_offset;
696         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
697
698         if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER))
699                 return;
700
701         if (!connect) {
702                 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
703                 SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
704                 rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
705
706                 return;
707         }
708
709         if (!si)
710                 return;
711
712         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P0);
713         ether_addr_copy(&h2c_pkt[1], bss_conf->bssid);
714         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
715
716         memset(h2c_pkt, 0, sizeof(h2c_pkt));
717         threshold = clamp_t(s32, threshold, rssi_min, rssi_max);
718         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
719         SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
720         SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt,
721                                                BCN_FILTER_OFFLOAD_MODE_DEFAULT);
722         SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, (u8)threshold);
723         SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, BCN_LOSS_CNT);
724         SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, si->mac_id);
725         SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, bss_conf->cqm_rssi_hyst);
726         SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, bss_conf->beacon_int);
727         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
728 }
729
730 void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
731 {
732         struct rtw_lps_conf *conf = &rtwdev->lps_conf;
733         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
734
735         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SET_PWR_MODE);
736
737         SET_PWR_MODE_SET_MODE(h2c_pkt, conf->mode);
738         SET_PWR_MODE_SET_RLBM(h2c_pkt, conf->rlbm);
739         SET_PWR_MODE_SET_SMART_PS(h2c_pkt, conf->smart_ps);
740         SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, conf->awake_interval);
741         SET_PWR_MODE_SET_PORT_ID(h2c_pkt, conf->port_id);
742         SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, conf->state);
743
744         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
745 }
746
747 void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable)
748 {
749         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
750         struct rtw_fw_wow_keep_alive_para mode = {
751                 .adopt = true,
752                 .pkt_type = KEEP_ALIVE_NULL_PKT,
753                 .period = 5,
754         };
755
756         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_KEEP_ALIVE);
757         SET_KEEP_ALIVE_ENABLE(h2c_pkt, enable);
758         SET_KEEP_ALIVE_ADOPT(h2c_pkt, mode.adopt);
759         SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, mode.pkt_type);
760         SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, mode.period);
761
762         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
763 }
764
765 void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable)
766 {
767         struct rtw_wow_param *rtw_wow = &rtwdev->wow;
768         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
769         struct rtw_fw_wow_disconnect_para mode = {
770                 .adopt = true,
771                 .period = 30,
772                 .retry_count = 5,
773         };
774
775         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_DISCONNECT_DECISION);
776
777         if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
778                 SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, enable);
779                 SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, mode.adopt);
780                 SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, mode.period);
781                 SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, mode.retry_count);
782         }
783
784         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
785 }
786
787 void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
788 {
789         struct rtw_wow_param *rtw_wow = &rtwdev->wow;
790         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
791
792         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WOWLAN);
793
794         SET_WOWLAN_FUNC_ENABLE(h2c_pkt, enable);
795         if (rtw_wow_mgd_linked(rtwdev)) {
796                 if (test_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
797                         SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, enable);
798                 if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
799                         SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, enable);
800                 if (test_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags))
801                         SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, enable);
802                 if (rtw_wow->pattern_cnt)
803                         SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, enable);
804         }
805
806         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
807 }
808
809 void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev,
810                                      u8 pairwise_key_enc,
811                                      u8 group_key_enc)
812 {
813         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
814
815         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_AOAC_GLOBAL_INFO);
816
817         SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, pairwise_key_enc);
818         SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, group_key_enc);
819
820         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
821 }
822
823 void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
824 {
825         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
826
827         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_REMOTE_WAKE_CTRL);
828
829         SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, enable);
830
831         if (rtw_wow_no_link(rtwdev))
832                 SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, enable);
833
834         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
835 }
836
837 static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
838                                      enum rtw_rsvd_packet_type type)
839 {
840         struct rtw_rsvd_page *rsvd_pkt;
841         u8 location = 0;
842
843         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
844                 if (type == rsvd_pkt->type)
845                         location = rsvd_pkt->page;
846         }
847
848         return location;
849 }
850
851 void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable)
852 {
853         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
854         u8 loc_nlo;
855
856         loc_nlo = rtw_get_rsvd_page_location(rtwdev, RSVD_NLO_INFO);
857
858         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_NLO_INFO);
859
860         SET_NLO_FUN_EN(h2c_pkt, enable);
861         if (enable) {
862                 if (rtw_get_lps_deep_mode(rtwdev) != LPS_DEEP_MODE_NONE)
863                         SET_NLO_PS_32K(h2c_pkt, enable);
864                 SET_NLO_IGNORE_SECURITY(h2c_pkt, enable);
865                 SET_NLO_LOC_NLO_INFO(h2c_pkt, loc_nlo);
866         }
867
868         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
869 }
870
871 void rtw_fw_set_recover_bt_device(struct rtw_dev *rtwdev)
872 {
873         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
874
875         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RECOVER_BT_DEV);
876         SET_RECOVER_BT_DEV_EN(h2c_pkt, 1);
877
878         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
879 }
880
881 void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
882 {
883         struct rtw_lps_conf *conf = &rtwdev->lps_conf;
884         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
885         u8 loc_pg, loc_dpk;
886
887         loc_pg = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_INFO);
888         loc_dpk = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_DPK);
889
890         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_LPS_PG_INFO);
891
892         LPS_PG_INFO_LOC(h2c_pkt, loc_pg);
893         LPS_PG_DPK_LOC(h2c_pkt, loc_dpk);
894         LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup);
895         LPS_PG_PATTERN_CAM_EN(h2c_pkt, conf->pattern_cam_backup);
896
897         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
898 }
899
900 static u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
901                                                struct cfg80211_ssid *ssid)
902 {
903         struct rtw_rsvd_page *rsvd_pkt;
904         u8 location = 0;
905
906         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
907                 if (rsvd_pkt->type != RSVD_PROBE_REQ)
908                         continue;
909                 if ((!ssid && !rsvd_pkt->ssid) ||
910                     rtw_ssid_equal(rsvd_pkt->ssid, ssid))
911                         location = rsvd_pkt->page;
912         }
913
914         return location;
915 }
916
917 static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
918                                             struct cfg80211_ssid *ssid)
919 {
920         struct rtw_rsvd_page *rsvd_pkt;
921         u16 size = 0;
922
923         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
924                 if (rsvd_pkt->type != RSVD_PROBE_REQ)
925                         continue;
926                 if ((!ssid && !rsvd_pkt->ssid) ||
927                     rtw_ssid_equal(rsvd_pkt->ssid, ssid))
928                         size = rsvd_pkt->probe_req_size;
929         }
930
931         return size;
932 }
933
934 void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
935 {
936         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
937         u8 location = 0;
938
939         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSVD_PAGE);
940
941         location = rtw_get_rsvd_page_location(rtwdev, RSVD_PROBE_RESP);
942         *(h2c_pkt + 1) = location;
943         rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PROBE_RESP loc: %d\n", location);
944
945         location = rtw_get_rsvd_page_location(rtwdev, RSVD_PS_POLL);
946         *(h2c_pkt + 2) = location;
947         rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PS_POLL loc: %d\n", location);
948
949         location = rtw_get_rsvd_page_location(rtwdev, RSVD_NULL);
950         *(h2c_pkt + 3) = location;
951         rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_NULL loc: %d\n", location);
952
953         location = rtw_get_rsvd_page_location(rtwdev, RSVD_QOS_NULL);
954         *(h2c_pkt + 4) = location;
955         rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_QOS_NULL loc: %d\n", location);
956
957         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
958 }
959
960 static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
961 {
962         struct rtw_dev *rtwdev = hw->priv;
963         const struct rtw_chip_info *chip = rtwdev->chip;
964         struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
965         struct rtw_nlo_info_hdr *nlo_hdr;
966         struct cfg80211_ssid *ssid;
967         struct sk_buff *skb;
968         u8 *pos, loc;
969         u32 size;
970         int i;
971
972         if (!pno_req->inited || !pno_req->match_set_cnt)
973                 return NULL;
974
975         size = sizeof(struct rtw_nlo_info_hdr) + pno_req->match_set_cnt *
976                       IEEE80211_MAX_SSID_LEN + chip->tx_pkt_desc_sz;
977
978         skb = alloc_skb(size, GFP_KERNEL);
979         if (!skb)
980                 return NULL;
981
982         skb_reserve(skb, chip->tx_pkt_desc_sz);
983
984         nlo_hdr = skb_put_zero(skb, sizeof(struct rtw_nlo_info_hdr));
985
986         nlo_hdr->nlo_count = pno_req->match_set_cnt;
987         nlo_hdr->hidden_ap_count = pno_req->match_set_cnt;
988
989         /* pattern check for firmware */
990         memset(nlo_hdr->pattern_check, 0xA5, FW_NLO_INFO_CHECK_SIZE);
991
992         for (i = 0; i < pno_req->match_set_cnt; i++)
993                 nlo_hdr->ssid_len[i] = pno_req->match_sets[i].ssid.ssid_len;
994
995         for (i = 0; i < pno_req->match_set_cnt; i++) {
996                 ssid = &pno_req->match_sets[i].ssid;
997                 loc  = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
998                 if (!loc) {
999                         rtw_err(rtwdev, "failed to get probe req rsvd loc\n");
1000                         kfree_skb(skb);
1001                         return NULL;
1002                 }
1003                 nlo_hdr->location[i] = loc;
1004         }
1005
1006         for (i = 0; i < pno_req->match_set_cnt; i++) {
1007                 pos = skb_put_zero(skb, IEEE80211_MAX_SSID_LEN);
1008                 memcpy(pos, pno_req->match_sets[i].ssid.ssid,
1009                        pno_req->match_sets[i].ssid.ssid_len);
1010         }
1011
1012         return skb;
1013 }
1014
1015 static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
1016 {
1017         struct rtw_dev *rtwdev = hw->priv;
1018         const struct rtw_chip_info *chip = rtwdev->chip;
1019         struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
1020         struct ieee80211_channel *channels = pno_req->channels;
1021         struct sk_buff *skb;
1022         int count =  pno_req->channel_cnt;
1023         u8 *pos;
1024         int i = 0;
1025
1026         skb = alloc_skb(4 * count + chip->tx_pkt_desc_sz, GFP_KERNEL);
1027         if (!skb)
1028                 return NULL;
1029
1030         skb_reserve(skb, chip->tx_pkt_desc_sz);
1031
1032         for (i = 0; i < count; i++) {
1033                 pos = skb_put_zero(skb, 4);
1034
1035                 CHSW_INFO_SET_CH(pos, channels[i].hw_value);
1036
1037                 if (channels[i].flags & IEEE80211_CHAN_RADAR)
1038                         CHSW_INFO_SET_ACTION_ID(pos, 0);
1039                 else
1040                         CHSW_INFO_SET_ACTION_ID(pos, 1);
1041                 CHSW_INFO_SET_TIMEOUT(pos, 1);
1042                 CHSW_INFO_SET_PRI_CH_IDX(pos, 1);
1043                 CHSW_INFO_SET_BW(pos, 0);
1044         }
1045
1046         return skb;
1047 }
1048
1049 static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
1050 {
1051         struct rtw_dev *rtwdev = hw->priv;
1052         const struct rtw_chip_info *chip = rtwdev->chip;
1053         struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
1054         struct rtw_lps_pg_dpk_hdr *dpk_hdr;
1055         struct sk_buff *skb;
1056         u32 size;
1057
1058         size = chip->tx_pkt_desc_sz + sizeof(*dpk_hdr);
1059         skb = alloc_skb(size, GFP_KERNEL);
1060         if (!skb)
1061                 return NULL;
1062
1063         skb_reserve(skb, chip->tx_pkt_desc_sz);
1064         dpk_hdr = skb_put_zero(skb, sizeof(*dpk_hdr));
1065         dpk_hdr->dpk_ch = dpk_info->dpk_ch;
1066         dpk_hdr->dpk_path_ok = dpk_info->dpk_path_ok[0];
1067         memcpy(dpk_hdr->dpk_txagc, dpk_info->dpk_txagc, 2);
1068         memcpy(dpk_hdr->dpk_gs, dpk_info->dpk_gs, 4);
1069         memcpy(dpk_hdr->coef, dpk_info->coef, 160);
1070
1071         return skb;
1072 }
1073
1074 static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw)
1075 {
1076         struct rtw_dev *rtwdev = hw->priv;
1077         const struct rtw_chip_info *chip = rtwdev->chip;
1078         struct rtw_lps_conf *conf = &rtwdev->lps_conf;
1079         struct rtw_lps_pg_info_hdr *pg_info_hdr;
1080         struct rtw_wow_param *rtw_wow = &rtwdev->wow;
1081         struct sk_buff *skb;
1082         u32 size;
1083
1084         size = chip->tx_pkt_desc_sz + sizeof(*pg_info_hdr);
1085         skb = alloc_skb(size, GFP_KERNEL);
1086         if (!skb)
1087                 return NULL;
1088
1089         skb_reserve(skb, chip->tx_pkt_desc_sz);
1090         pg_info_hdr = skb_put_zero(skb, sizeof(*pg_info_hdr));
1091         pg_info_hdr->tx_bu_page_count = rtwdev->fifo.rsvd_drv_pg_num;
1092         pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
1093         pg_info_hdr->sec_cam_count =
1094                 rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam);
1095         pg_info_hdr->pattern_count = rtw_wow->pattern_cnt;
1096
1097         conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0;
1098         conf->pattern_cam_backup = rtw_wow->pattern_cnt != 0;
1099
1100         return skb;
1101 }
1102
1103 static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
1104                                              struct rtw_rsvd_page *rsvd_pkt)
1105 {
1106         struct ieee80211_vif *vif;
1107         struct rtw_vif *rtwvif;
1108         struct sk_buff *skb_new;
1109         struct cfg80211_ssid *ssid;
1110         u16 tim_offset = 0;
1111
1112         if (rsvd_pkt->type == RSVD_DUMMY) {
1113                 skb_new = alloc_skb(1, GFP_KERNEL);
1114                 if (!skb_new)
1115                         return NULL;
1116
1117                 skb_put(skb_new, 1);
1118                 return skb_new;
1119         }
1120
1121         rtwvif = rsvd_pkt->rtwvif;
1122         if (!rtwvif)
1123                 return NULL;
1124
1125         vif = rtwvif_to_vif(rtwvif);
1126
1127         switch (rsvd_pkt->type) {
1128         case RSVD_BEACON:
1129                 skb_new = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL, 0);
1130                 rsvd_pkt->tim_offset = tim_offset;
1131                 break;
1132         case RSVD_PS_POLL:
1133                 skb_new = ieee80211_pspoll_get(hw, vif);
1134                 break;
1135         case RSVD_PROBE_RESP:
1136                 skb_new = ieee80211_proberesp_get(hw, vif);
1137                 break;
1138         case RSVD_NULL:
1139                 skb_new = ieee80211_nullfunc_get(hw, vif, -1, false);
1140                 break;
1141         case RSVD_QOS_NULL:
1142                 skb_new = ieee80211_nullfunc_get(hw, vif, -1, true);
1143                 break;
1144         case RSVD_LPS_PG_DPK:
1145                 skb_new = rtw_lps_pg_dpk_get(hw);
1146                 break;
1147         case RSVD_LPS_PG_INFO:
1148                 skb_new = rtw_lps_pg_info_get(hw);
1149                 break;
1150         case RSVD_PROBE_REQ:
1151                 ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid;
1152                 if (ssid)
1153                         skb_new = ieee80211_probereq_get(hw, vif->addr,
1154                                                          ssid->ssid,
1155                                                          ssid->ssid_len, 0);
1156                 else
1157                         skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0);
1158                 if (skb_new)
1159                         rsvd_pkt->probe_req_size = (u16)skb_new->len;
1160                 break;
1161         case RSVD_NLO_INFO:
1162                 skb_new = rtw_nlo_info_get(hw);
1163                 break;
1164         case RSVD_CH_INFO:
1165                 skb_new = rtw_cs_channel_info_get(hw);
1166                 break;
1167         default:
1168                 return NULL;
1169         }
1170
1171         if (!skb_new)
1172                 return NULL;
1173
1174         return skb_new;
1175 }
1176
1177 static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
1178                                     enum rtw_rsvd_packet_type type)
1179 {
1180         struct rtw_tx_pkt_info pkt_info = {0};
1181         const struct rtw_chip_info *chip = rtwdev->chip;
1182         u8 *pkt_desc;
1183
1184         rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type);
1185         pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
1186         memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
1187         rtw_tx_fill_tx_desc(&pkt_info, skb);
1188 }
1189
1190 static inline u8 rtw_len_to_page(unsigned int len, u8 page_size)
1191 {
1192         return DIV_ROUND_UP(len, page_size);
1193 }
1194
1195 static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
1196                                       u8 page_margin, u32 page, u8 *buf,
1197                                       struct rtw_rsvd_page *rsvd_pkt)
1198 {
1199         struct sk_buff *skb = rsvd_pkt->skb;
1200
1201         if (page >= 1)
1202                 memcpy(buf + page_margin + page_size * (page - 1),
1203                        skb->data, skb->len);
1204         else
1205                 memcpy(buf, skb->data, skb->len);
1206 }
1207
1208 static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev,
1209                                                  enum rtw_rsvd_packet_type type,
1210                                                  bool txdesc)
1211 {
1212         struct rtw_rsvd_page *rsvd_pkt = NULL;
1213
1214         rsvd_pkt = kzalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
1215
1216         if (!rsvd_pkt)
1217                 return NULL;
1218
1219         INIT_LIST_HEAD(&rsvd_pkt->vif_list);
1220         INIT_LIST_HEAD(&rsvd_pkt->build_list);
1221         rsvd_pkt->type = type;
1222         rsvd_pkt->add_txdesc = txdesc;
1223
1224         return rsvd_pkt;
1225 }
1226
1227 static void rtw_insert_rsvd_page(struct rtw_dev *rtwdev,
1228                                  struct rtw_vif *rtwvif,
1229                                  struct rtw_rsvd_page *rsvd_pkt)
1230 {
1231         lockdep_assert_held(&rtwdev->mutex);
1232
1233         list_add_tail(&rsvd_pkt->vif_list, &rtwvif->rsvd_page_list);
1234 }
1235
1236 static void rtw_add_rsvd_page(struct rtw_dev *rtwdev,
1237                               struct rtw_vif *rtwvif,
1238                               enum rtw_rsvd_packet_type type,
1239                               bool txdesc)
1240 {
1241         struct rtw_rsvd_page *rsvd_pkt;
1242
1243         rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, type, txdesc);
1244         if (!rsvd_pkt) {
1245                 rtw_err(rtwdev, "failed to alloc rsvd page %d\n", type);
1246                 return;
1247         }
1248
1249         rsvd_pkt->rtwvif = rtwvif;
1250         rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
1251 }
1252
1253 static void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
1254                                         struct rtw_vif *rtwvif,
1255                                         struct cfg80211_ssid *ssid)
1256 {
1257         struct rtw_rsvd_page *rsvd_pkt;
1258
1259         rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_PROBE_REQ, true);
1260         if (!rsvd_pkt) {
1261                 rtw_err(rtwdev, "failed to alloc probe req rsvd page\n");
1262                 return;
1263         }
1264
1265         rsvd_pkt->rtwvif = rtwvif;
1266         rsvd_pkt->ssid = ssid;
1267         rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
1268 }
1269
1270 void rtw_remove_rsvd_page(struct rtw_dev *rtwdev,
1271                           struct rtw_vif *rtwvif)
1272 {
1273         struct rtw_rsvd_page *rsvd_pkt, *tmp;
1274
1275         lockdep_assert_held(&rtwdev->mutex);
1276
1277         /* remove all of the rsvd pages for vif */
1278         list_for_each_entry_safe(rsvd_pkt, tmp, &rtwvif->rsvd_page_list,
1279                                  vif_list) {
1280                 list_del(&rsvd_pkt->vif_list);
1281                 if (!list_empty(&rsvd_pkt->build_list))
1282                         list_del(&rsvd_pkt->build_list);
1283                 kfree(rsvd_pkt);
1284         }
1285 }
1286
1287 void rtw_add_rsvd_page_bcn(struct rtw_dev *rtwdev,
1288                            struct rtw_vif *rtwvif)
1289 {
1290         struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1291
1292         if (vif->type != NL80211_IFTYPE_AP &&
1293             vif->type != NL80211_IFTYPE_ADHOC &&
1294             vif->type != NL80211_IFTYPE_MESH_POINT) {
1295                 rtw_warn(rtwdev, "Cannot add beacon rsvd page for %d\n",
1296                          vif->type);
1297                 return;
1298         }
1299
1300         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_BEACON, false);
1301 }
1302
1303 void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev,
1304                            struct rtw_vif *rtwvif)
1305 {
1306         struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1307         struct rtw_wow_param *rtw_wow = &rtwdev->wow;
1308         struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req;
1309         struct cfg80211_ssid *ssid;
1310         int i;
1311
1312         if (vif->type != NL80211_IFTYPE_STATION) {
1313                 rtw_warn(rtwdev, "Cannot add PNO rsvd page for %d\n",
1314                          vif->type);
1315                 return;
1316         }
1317
1318         for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) {
1319                 ssid = &rtw_pno_req->match_sets[i].ssid;
1320                 rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, ssid);
1321         }
1322
1323         rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, NULL);
1324         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NLO_INFO, false);
1325         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_CH_INFO, true);
1326 }
1327
1328 void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
1329                            struct rtw_vif *rtwvif)
1330 {
1331         struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1332
1333         if (vif->type != NL80211_IFTYPE_STATION) {
1334                 rtw_warn(rtwdev, "Cannot add sta rsvd page for %d\n",
1335                          vif->type);
1336                 return;
1337         }
1338
1339         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_PS_POLL, true);
1340         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_QOS_NULL, true);
1341         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NULL, true);
1342         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_DPK, true);
1343         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_INFO, true);
1344 }
1345
1346 int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
1347                                 u8 *buf, u32 size)
1348 {
1349         u8 bckp[2];
1350         u8 val;
1351         u16 rsvd_pg_head;
1352         u32 bcn_valid_addr;
1353         u32 bcn_valid_mask;
1354         int ret;
1355
1356         lockdep_assert_held(&rtwdev->mutex);
1357
1358         if (!size)
1359                 return -EINVAL;
1360
1361         if (rtw_chip_wcpu_11n(rtwdev)) {
1362                 rtw_write32_set(rtwdev, REG_DWBCN0_CTRL, BIT_BCN_VALID);
1363         } else {
1364                 pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
1365                 pg_addr |= BIT_BCN_VALID_V1;
1366                 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr);
1367         }
1368
1369         val = rtw_read8(rtwdev, REG_CR + 1);
1370         bckp[0] = val;
1371         val |= BIT_ENSWBCN >> 8;
1372         rtw_write8(rtwdev, REG_CR + 1, val);
1373
1374         val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
1375         bckp[1] = val;
1376         val &= ~(BIT_EN_BCNQ_DL >> 16);
1377         rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
1378
1379         ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
1380         if (ret) {
1381                 rtw_err(rtwdev, "failed to write data to rsvd page\n");
1382                 goto restore;
1383         }
1384
1385         if (rtw_chip_wcpu_11n(rtwdev)) {
1386                 bcn_valid_addr = REG_DWBCN0_CTRL;
1387                 bcn_valid_mask = BIT_BCN_VALID;
1388         } else {
1389                 bcn_valid_addr = REG_FIFOPAGE_CTRL_2;
1390                 bcn_valid_mask = BIT_BCN_VALID_V1;
1391         }
1392
1393         if (!check_hw_ready(rtwdev, bcn_valid_addr, bcn_valid_mask, 1)) {
1394                 rtw_err(rtwdev, "error beacon valid\n");
1395                 ret = -EBUSY;
1396         }
1397
1398 restore:
1399         rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
1400         rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
1401                     rsvd_pg_head | BIT_BCN_VALID_V1);
1402         rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
1403         rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
1404
1405         return ret;
1406 }
1407
1408 static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
1409 {
1410         u32 pg_size;
1411         u32 pg_num = 0;
1412         u16 pg_addr = 0;
1413
1414         pg_size = rtwdev->chip->page_size;
1415         pg_num = size / pg_size + ((size & (pg_size - 1)) ? 1 : 0);
1416         if (pg_num > rtwdev->fifo.rsvd_drv_pg_num)
1417                 return -ENOMEM;
1418
1419         pg_addr = rtwdev->fifo.rsvd_drv_addr;
1420
1421         return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
1422 }
1423
1424 static void __rtw_build_rsvd_page_reset(struct rtw_dev *rtwdev)
1425 {
1426         struct rtw_rsvd_page *rsvd_pkt, *tmp;
1427
1428         list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list,
1429                                  build_list) {
1430                 list_del_init(&rsvd_pkt->build_list);
1431
1432                 /* Don't free except for the dummy rsvd page,
1433                  * others will be freed when removing vif
1434                  */
1435                 if (rsvd_pkt->type == RSVD_DUMMY)
1436                         kfree(rsvd_pkt);
1437         }
1438 }
1439
1440 static void rtw_build_rsvd_page_iter(void *data, u8 *mac,
1441                                      struct ieee80211_vif *vif)
1442 {
1443         struct rtw_dev *rtwdev = data;
1444         struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
1445         struct rtw_rsvd_page *rsvd_pkt;
1446
1447         /* AP not yet started, don't gather its rsvd pages */
1448         if (vif->type == NL80211_IFTYPE_AP && !rtwdev->ap_active)
1449                 return;
1450
1451         list_for_each_entry(rsvd_pkt, &rtwvif->rsvd_page_list, vif_list) {
1452                 if (rsvd_pkt->type == RSVD_BEACON)
1453                         list_add(&rsvd_pkt->build_list,
1454                                  &rtwdev->rsvd_page_list);
1455                 else
1456                         list_add_tail(&rsvd_pkt->build_list,
1457                                       &rtwdev->rsvd_page_list);
1458         }
1459 }
1460
1461 static int  __rtw_build_rsvd_page_from_vifs(struct rtw_dev *rtwdev)
1462 {
1463         struct rtw_rsvd_page *rsvd_pkt;
1464
1465         __rtw_build_rsvd_page_reset(rtwdev);
1466
1467         /* gather rsvd page from vifs */
1468         rtw_iterate_vifs_atomic(rtwdev, rtw_build_rsvd_page_iter, rtwdev);
1469
1470         rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
1471                                             struct rtw_rsvd_page, build_list);
1472         if (!rsvd_pkt) {
1473                 WARN(1, "Should not have an empty reserved page\n");
1474                 return -EINVAL;
1475         }
1476
1477         /* the first rsvd should be beacon, otherwise add a dummy one */
1478         if (rsvd_pkt->type != RSVD_BEACON) {
1479                 struct rtw_rsvd_page *dummy_pkt;
1480
1481                 dummy_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_DUMMY, false);
1482                 if (!dummy_pkt) {
1483                         rtw_err(rtwdev, "failed to alloc dummy rsvd page\n");
1484                         return -ENOMEM;
1485                 }
1486
1487                 list_add(&dummy_pkt->build_list, &rtwdev->rsvd_page_list);
1488         }
1489
1490         return 0;
1491 }
1492
1493 static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size)
1494 {
1495         struct ieee80211_hw *hw = rtwdev->hw;
1496         const struct rtw_chip_info *chip = rtwdev->chip;
1497         struct sk_buff *iter;
1498         struct rtw_rsvd_page *rsvd_pkt;
1499         u32 page = 0;
1500         u8 total_page = 0;
1501         u8 page_size, page_margin, tx_desc_sz;
1502         u8 *buf;
1503         int ret;
1504
1505         page_size = chip->page_size;
1506         tx_desc_sz = chip->tx_pkt_desc_sz;
1507         page_margin = page_size - tx_desc_sz;
1508
1509         ret = __rtw_build_rsvd_page_from_vifs(rtwdev);
1510         if (ret) {
1511                 rtw_err(rtwdev,
1512                         "failed to build rsvd page from vifs, ret %d\n", ret);
1513                 return NULL;
1514         }
1515
1516         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1517                 iter = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
1518                 if (!iter) {
1519                         rtw_err(rtwdev, "failed to build rsvd packet\n");
1520                         goto release_skb;
1521                 }
1522
1523                 /* Fill the tx_desc for the rsvd pkt that requires one.
1524                  * And iter->len will be added with size of tx_desc_sz.
1525                  */
1526                 if (rsvd_pkt->add_txdesc)
1527                         rtw_fill_rsvd_page_desc(rtwdev, iter, rsvd_pkt->type);
1528
1529                 rsvd_pkt->skb = iter;
1530                 rsvd_pkt->page = total_page;
1531
1532                 /* Reserved page is downloaded via TX path, and TX path will
1533                  * generate a tx_desc at the header to describe length of
1534                  * the buffer. If we are not counting page numbers with the
1535                  * size of tx_desc added at the first rsvd_pkt (usually a
1536                  * beacon, firmware default refer to the first page as the
1537                  * content of beacon), we could generate a buffer which size
1538                  * is smaller than the actual size of the whole rsvd_page
1539                  */
1540                 if (total_page == 0) {
1541                         if (rsvd_pkt->type != RSVD_BEACON &&
1542                             rsvd_pkt->type != RSVD_DUMMY) {
1543                                 rtw_err(rtwdev, "first page should be a beacon\n");
1544                                 goto release_skb;
1545                         }
1546                         total_page += rtw_len_to_page(iter->len + tx_desc_sz,
1547                                                       page_size);
1548                 } else {
1549                         total_page += rtw_len_to_page(iter->len, page_size);
1550                 }
1551         }
1552
1553         if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
1554                 rtw_err(rtwdev, "rsvd page over size: %d\n", total_page);
1555                 goto release_skb;
1556         }
1557
1558         *size = (total_page - 1) * page_size + page_margin;
1559         buf = kzalloc(*size, GFP_KERNEL);
1560         if (!buf)
1561                 goto release_skb;
1562
1563         /* Copy the content of each rsvd_pkt to the buf, and they should
1564          * be aligned to the pages.
1565          *
1566          * Note that the first rsvd_pkt is a beacon no matter what vif->type.
1567          * And that rsvd_pkt does not require tx_desc because when it goes
1568          * through TX path, the TX path will generate one for it.
1569          */
1570         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1571                 rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
1572                                           page, buf, rsvd_pkt);
1573                 if (page == 0)
1574                         page += rtw_len_to_page(rsvd_pkt->skb->len +
1575                                                 tx_desc_sz, page_size);
1576                 else
1577                         page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
1578
1579                 kfree_skb(rsvd_pkt->skb);
1580                 rsvd_pkt->skb = NULL;
1581         }
1582
1583         return buf;
1584
1585 release_skb:
1586         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1587                 kfree_skb(rsvd_pkt->skb);
1588                 rsvd_pkt->skb = NULL;
1589         }
1590
1591         return NULL;
1592 }
1593
1594 static int rtw_download_beacon(struct rtw_dev *rtwdev)
1595 {
1596         struct ieee80211_hw *hw = rtwdev->hw;
1597         struct rtw_rsvd_page *rsvd_pkt;
1598         struct sk_buff *skb;
1599         int ret = 0;
1600
1601         rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
1602                                             struct rtw_rsvd_page, build_list);
1603         if (!rsvd_pkt) {
1604                 rtw_err(rtwdev, "failed to get rsvd page from build list\n");
1605                 return -ENOENT;
1606         }
1607
1608         if (rsvd_pkt->type != RSVD_BEACON &&
1609             rsvd_pkt->type != RSVD_DUMMY) {
1610                 rtw_err(rtwdev, "invalid rsvd page type %d, should be beacon or dummy\n",
1611                         rsvd_pkt->type);
1612                 return -EINVAL;
1613         }
1614
1615         skb = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
1616         if (!skb) {
1617                 rtw_err(rtwdev, "failed to get beacon skb\n");
1618                 return -ENOMEM;
1619         }
1620
1621         ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len);
1622         if (ret)
1623                 rtw_err(rtwdev, "failed to download drv rsvd page\n");
1624
1625         dev_kfree_skb(skb);
1626
1627         return ret;
1628 }
1629
1630 int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev)
1631 {
1632         u8 *buf;
1633         u32 size;
1634         int ret;
1635
1636         buf = rtw_build_rsvd_page(rtwdev, &size);
1637         if (!buf) {
1638                 rtw_err(rtwdev, "failed to build rsvd page pkt\n");
1639                 return -ENOMEM;
1640         }
1641
1642         ret = rtw_download_drv_rsvd_page(rtwdev, buf, size);
1643         if (ret) {
1644                 rtw_err(rtwdev, "failed to download drv rsvd page\n");
1645                 goto free;
1646         }
1647
1648         /* The last thing is to download the *ONLY* beacon again, because
1649          * the previous tx_desc is to describe the total rsvd page. Download
1650          * the beacon again to replace the TX desc header, and we will get
1651          * a correct tx_desc for the beacon in the rsvd page.
1652          */
1653         ret = rtw_download_beacon(rtwdev);
1654         if (ret) {
1655                 rtw_err(rtwdev, "failed to download beacon\n");
1656                 goto free;
1657         }
1658
1659 free:
1660         kfree(buf);
1661
1662         return ret;
1663 }
1664
1665 void rtw_fw_update_beacon_work(struct work_struct *work)
1666 {
1667         struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
1668                                               update_beacon_work);
1669
1670         mutex_lock(&rtwdev->mutex);
1671         rtw_fw_download_rsvd_page(rtwdev);
1672         rtw_send_rsvd_page_h2c(rtwdev);
1673         mutex_unlock(&rtwdev->mutex);
1674 }
1675
1676 static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
1677                                   u32 *buf, u32 residue, u16 start_pg)
1678 {
1679         u32 i;
1680         u16 idx = 0;
1681         u16 ctl;
1682
1683         ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
1684         /* disable rx clock gate */
1685         rtw_write32_set(rtwdev, REG_RCR, BIT_DISGCLK);
1686
1687         do {
1688                 rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl);
1689
1690                 for (i = FIFO_DUMP_ADDR + residue;
1691                      i < FIFO_DUMP_ADDR + FIFO_PAGE_SIZE; i += 4) {
1692                         buf[idx++] = rtw_read32(rtwdev, i);
1693                         size -= 4;
1694                         if (size == 0)
1695                                 goto out;
1696                 }
1697
1698                 residue = 0;
1699                 start_pg++;
1700         } while (size);
1701
1702 out:
1703         rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
1704         /* restore rx clock gate */
1705         rtw_write32_clr(rtwdev, REG_RCR, BIT_DISGCLK);
1706 }
1707
1708 static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel,
1709                              u32 offset, u32 size, u32 *buf)
1710 {
1711         const struct rtw_chip_info *chip = rtwdev->chip;
1712         u32 start_pg, residue;
1713
1714         if (sel >= RTW_FW_FIFO_MAX) {
1715                 rtw_dbg(rtwdev, RTW_DBG_FW, "wrong fw fifo sel\n");
1716                 return;
1717         }
1718         if (sel == RTW_FW_FIFO_SEL_RSVD_PAGE)
1719                 offset += rtwdev->fifo.rsvd_boundary << TX_PAGE_SIZE_SHIFT;
1720         residue = offset & (FIFO_PAGE_SIZE - 1);
1721         start_pg = (offset >> FIFO_PAGE_SIZE_SHIFT) + chip->fw_fifo_addr[sel];
1722
1723         rtw_fw_read_fifo_page(rtwdev, offset, size, buf, residue, start_pg);
1724 }
1725
1726 static bool rtw_fw_dump_check_size(struct rtw_dev *rtwdev,
1727                                    enum rtw_fw_fifo_sel sel,
1728                                    u32 start_addr, u32 size)
1729 {
1730         switch (sel) {
1731         case RTW_FW_FIFO_SEL_TX:
1732         case RTW_FW_FIFO_SEL_RX:
1733                 if ((start_addr + size) > rtwdev->chip->fw_fifo_addr[sel])
1734                         return false;
1735                 fallthrough;
1736         default:
1737                 return true;
1738         }
1739 }
1740
1741 int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
1742                      u32 *buffer)
1743 {
1744         if (!rtwdev->chip->fw_fifo_addr[0]) {
1745                 rtw_dbg(rtwdev, RTW_DBG_FW, "chip not support dump fw fifo\n");
1746                 return -ENOTSUPP;
1747         }
1748
1749         if (size == 0 || !buffer)
1750                 return -EINVAL;
1751
1752         if (size & 0x3) {
1753                 rtw_dbg(rtwdev, RTW_DBG_FW, "not 4byte alignment\n");
1754                 return -EINVAL;
1755         }
1756
1757         if (!rtw_fw_dump_check_size(rtwdev, fifo_sel, addr, size)) {
1758                 rtw_dbg(rtwdev, RTW_DBG_FW, "fw fifo dump size overflow\n");
1759                 return -EINVAL;
1760         }
1761
1762         rtw_fw_read_fifo(rtwdev, fifo_sel, addr, size, buffer);
1763
1764         return 0;
1765 }
1766
1767 static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
1768                                 u8 location)
1769 {
1770         const struct rtw_chip_info *chip = rtwdev->chip;
1771         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1772         u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
1773
1774         rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_UPDATE_PKT);
1775
1776         SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
1777         UPDATE_PKT_SET_PKT_ID(h2c_pkt, pkt_id);
1778         UPDATE_PKT_SET_LOCATION(h2c_pkt, location);
1779
1780         /* include txdesc size */
1781         size += chip->tx_pkt_desc_sz;
1782         UPDATE_PKT_SET_SIZE(h2c_pkt, size);
1783
1784         rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
1785 }
1786
1787 void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
1788                                  struct cfg80211_ssid *ssid)
1789 {
1790         u8 loc;
1791         u16 size;
1792
1793         loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
1794         if (!loc) {
1795                 rtw_err(rtwdev, "failed to get probe_req rsvd loc\n");
1796                 return;
1797         }
1798
1799         size = rtw_get_rsvd_page_probe_req_size(rtwdev, ssid);
1800         if (!size) {
1801                 rtw_err(rtwdev, "failed to get probe_req rsvd size\n");
1802                 return;
1803         }
1804
1805         __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, size, loc);
1806 }
1807
1808 void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
1809 {
1810         struct rtw_pno_request *rtw_pno_req = &rtwdev->wow.pno_req;
1811         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1812         u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_CH_SWITCH_LEN;
1813         u8 loc_ch_info;
1814         const struct rtw_ch_switch_option cs_option = {
1815                 .dest_ch_en = 1,
1816                 .dest_ch = 1,
1817                 .periodic_option = 2,
1818                 .normal_period = 5,
1819                 .normal_period_sel = 0,
1820                 .normal_cycle = 10,
1821                 .slow_period = 1,
1822                 .slow_period_sel = 1,
1823         };
1824
1825         rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_CH_SWITCH);
1826         SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
1827
1828         CH_SWITCH_SET_START(h2c_pkt, enable);
1829         CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, cs_option.dest_ch_en);
1830         CH_SWITCH_SET_DEST_CH(h2c_pkt, cs_option.dest_ch);
1831         CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, cs_option.normal_period);
1832         CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, cs_option.normal_period_sel);
1833         CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, cs_option.slow_period);
1834         CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, cs_option.slow_period_sel);
1835         CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, cs_option.normal_cycle);
1836         CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, cs_option.periodic_option);
1837
1838         CH_SWITCH_SET_CH_NUM(h2c_pkt, rtw_pno_req->channel_cnt);
1839         CH_SWITCH_SET_INFO_SIZE(h2c_pkt, rtw_pno_req->channel_cnt * 4);
1840
1841         loc_ch_info = rtw_get_rsvd_page_location(rtwdev, RSVD_CH_INFO);
1842         CH_SWITCH_SET_INFO_LOC(h2c_pkt, loc_ch_info);
1843
1844         rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
1845 }
1846
1847 void rtw_fw_adaptivity(struct rtw_dev *rtwdev)
1848 {
1849         struct rtw_dm_info *dm_info = &rtwdev->dm_info;
1850         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1851
1852         if (!rtw_edcca_enabled) {
1853                 dm_info->edcca_mode = RTW_EDCCA_NORMAL;
1854                 rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
1855                         "EDCCA disabled by debugfs\n");
1856         }
1857
1858         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_ADAPTIVITY);
1859         SET_ADAPTIVITY_MODE(h2c_pkt, dm_info->edcca_mode);
1860         SET_ADAPTIVITY_OPTION(h2c_pkt, 1);
1861         SET_ADAPTIVITY_IGI(h2c_pkt, dm_info->igi_history[0]);
1862         SET_ADAPTIVITY_L2H(h2c_pkt, dm_info->l2h_th_ini);
1863         SET_ADAPTIVITY_DENSITY(h2c_pkt, dm_info->scan_density);
1864
1865         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
1866 }
1867
1868 void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
1869 {
1870         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1871
1872         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SCAN);
1873         SET_SCAN_START(h2c_pkt, start);
1874
1875         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
1876 }
1877
1878 static int rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
1879                                    struct sk_buff_head *list, u8 *bands,
1880                                    struct rtw_vif *rtwvif)
1881 {
1882         const struct rtw_chip_info *chip = rtwdev->chip;
1883         struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
1884         struct sk_buff *new;
1885         u8 idx;
1886
1887         for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
1888                 if (!(BIT(idx) & chip->band))
1889                         continue;
1890                 new = skb_copy(skb, GFP_KERNEL);
1891                 if (!new)
1892                         return -ENOMEM;
1893                 skb_put_data(new, ies->ies[idx], ies->len[idx]);
1894                 skb_put_data(new, ies->common_ies, ies->common_ie_len);
1895                 skb_queue_tail(list, new);
1896                 (*bands)++;
1897         }
1898
1899         return 0;
1900 }
1901
1902 static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
1903                                          struct sk_buff_head *probe_req_list)
1904 {
1905         const struct rtw_chip_info *chip = rtwdev->chip;
1906         struct sk_buff *skb, *tmp;
1907         u8 page_offset = 1, *buf, page_size = chip->page_size;
1908         u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc;
1909         u16 buf_offset = page_size * page_offset;
1910         u8 tx_desc_sz = chip->tx_pkt_desc_sz;
1911         u8 page_cnt, pages;
1912         unsigned int pkt_len;
1913         int ret;
1914
1915         if (rtw_fw_feature_ext_check(&rtwdev->fw, FW_FEATURE_EXT_OLD_PAGE_NUM))
1916                 page_cnt = RTW_OLD_PROBE_PG_CNT;
1917         else
1918                 page_cnt = RTW_PROBE_PG_CNT;
1919
1920         pages = page_offset + num_probes * page_cnt;
1921
1922         buf = kzalloc(page_size * pages, GFP_KERNEL);
1923         if (!buf)
1924                 return -ENOMEM;
1925
1926         buf_offset -= tx_desc_sz;
1927         skb_queue_walk_safe(probe_req_list, skb, tmp) {
1928                 skb_unlink(skb, probe_req_list);
1929                 rtw_fill_rsvd_page_desc(rtwdev, skb, RSVD_PROBE_REQ);
1930                 if (skb->len > page_size * page_cnt) {
1931                         ret = -EINVAL;
1932                         goto out;
1933                 }
1934
1935                 memcpy(buf + buf_offset, skb->data, skb->len);
1936                 pkt_len = skb->len - tx_desc_sz;
1937                 loc = pg_addr - rtwdev->fifo.rsvd_boundary + page_offset;
1938                 __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, pkt_len, loc);
1939
1940                 buf_offset += page_cnt * page_size;
1941                 page_offset += page_cnt;
1942                 kfree_skb(skb);
1943         }
1944
1945         ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, buf_offset);
1946         if (ret) {
1947                 rtw_err(rtwdev, "Download probe request to firmware failed\n");
1948                 goto out;
1949         }
1950
1951         rtwdev->scan_info.probe_pg_size = page_offset;
1952 out:
1953         kfree(buf);
1954         skb_queue_walk_safe(probe_req_list, skb, tmp)
1955                 kfree_skb(skb);
1956
1957         return ret;
1958 }
1959
1960 static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev,
1961                                         struct rtw_vif *rtwvif)
1962 {
1963         struct cfg80211_scan_request *req = rtwvif->scan_req;
1964         struct sk_buff_head list;
1965         struct sk_buff *skb, *tmp;
1966         u8 num = req->n_ssids, i, bands = 0;
1967         int ret;
1968
1969         skb_queue_head_init(&list);
1970         for (i = 0; i < num; i++) {
1971                 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
1972                                              req->ssids[i].ssid,
1973                                              req->ssids[i].ssid_len,
1974                                              req->ie_len);
1975                 if (!skb) {
1976                         ret = -ENOMEM;
1977                         goto out;
1978                 }
1979                 ret = rtw_append_probe_req_ie(rtwdev, skb, &list, &bands,
1980                                               rtwvif);
1981                 if (ret)
1982                         goto out;
1983
1984                 kfree_skb(skb);
1985         }
1986
1987         return _rtw_hw_scan_update_probe_req(rtwdev, num * bands, &list);
1988
1989 out:
1990         skb_queue_walk_safe(&list, skb, tmp)
1991                 kfree_skb(skb);
1992
1993         return ret;
1994 }
1995
1996 static int rtw_add_chan_info(struct rtw_dev *rtwdev, struct rtw_chan_info *info,
1997                              struct rtw_chan_list *list, u8 *buf)
1998 {
1999         u8 *chan = &buf[list->size];
2000         u8 info_size = RTW_CH_INFO_SIZE;
2001
2002         if (list->size > list->buf_size)
2003                 return -ENOMEM;
2004
2005         CH_INFO_SET_CH(chan, info->channel);
2006         CH_INFO_SET_PRI_CH_IDX(chan, info->pri_ch_idx);
2007         CH_INFO_SET_BW(chan, info->bw);
2008         CH_INFO_SET_TIMEOUT(chan, info->timeout);
2009         CH_INFO_SET_ACTION_ID(chan, info->action_id);
2010         CH_INFO_SET_EXTRA_INFO(chan, info->extra_info);
2011         if (info->extra_info) {
2012                 EXTRA_CH_INFO_SET_ID(chan, RTW_SCAN_EXTRA_ID_DFS);
2013                 EXTRA_CH_INFO_SET_INFO(chan, RTW_SCAN_EXTRA_ACTION_SCAN);
2014                 EXTRA_CH_INFO_SET_SIZE(chan, RTW_EX_CH_INFO_SIZE -
2015                                        RTW_EX_CH_INFO_HDR_SIZE);
2016                 EXTRA_CH_INFO_SET_DFS_EXT_TIME(chan, RTW_DFS_CHAN_TIME);
2017                 info_size += RTW_EX_CH_INFO_SIZE;
2018         }
2019         list->size += info_size;
2020         list->ch_num++;
2021
2022         return 0;
2023 }
2024
2025 static int rtw_add_chan_list(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
2026                              struct rtw_chan_list *list, u8 *buf)
2027 {
2028         struct cfg80211_scan_request *req = rtwvif->scan_req;
2029         struct rtw_fifo_conf *fifo = &rtwdev->fifo;
2030         struct ieee80211_channel *channel;
2031         int i, ret = 0;
2032
2033         for (i = 0; i < req->n_channels; i++) {
2034                 struct rtw_chan_info ch_info = {0};
2035
2036                 channel = req->channels[i];
2037                 ch_info.channel = channel->hw_value;
2038                 ch_info.bw = RTW_SCAN_WIDTH;
2039                 ch_info.pri_ch_idx = RTW_PRI_CH_IDX;
2040                 ch_info.timeout = req->duration_mandatory ?
2041                                   req->duration : RTW_CHANNEL_TIME;
2042
2043                 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) {
2044                         ch_info.action_id = RTW_CHANNEL_RADAR;
2045                         ch_info.extra_info = 1;
2046                         /* Overwrite duration for passive scans if necessary */
2047                         ch_info.timeout = ch_info.timeout > RTW_PASS_CHAN_TIME ?
2048                                           ch_info.timeout : RTW_PASS_CHAN_TIME;
2049                 } else {
2050                         ch_info.action_id = RTW_CHANNEL_ACTIVE;
2051                 }
2052
2053                 ret = rtw_add_chan_info(rtwdev, &ch_info, list, buf);
2054                 if (ret)
2055                         return ret;
2056         }
2057
2058         if (list->size > fifo->rsvd_pg_num << TX_PAGE_SIZE_SHIFT) {
2059                 rtw_err(rtwdev, "List exceeds rsvd page total size\n");
2060                 return -EINVAL;
2061         }
2062
2063         list->addr = fifo->rsvd_h2c_info_addr + rtwdev->scan_info.probe_pg_size;
2064         ret = rtw_fw_write_data_rsvd_page(rtwdev, list->addr, buf, list->size);
2065         if (ret)
2066                 rtw_err(rtwdev, "Download channel list failed\n");
2067
2068         return ret;
2069 }
2070
2071 static void rtw_fw_set_scan_offload(struct rtw_dev *rtwdev,
2072                                     struct rtw_ch_switch_option *opt,
2073                                     struct rtw_vif *rtwvif,
2074                                     struct rtw_chan_list *list)
2075 {
2076         struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2077         struct cfg80211_scan_request *req = rtwvif->scan_req;
2078         struct rtw_fifo_conf *fifo = &rtwdev->fifo;
2079         /* reserve one dummy page at the beginning for tx descriptor */
2080         u8 pkt_loc = fifo->rsvd_h2c_info_addr - fifo->rsvd_boundary + 1;
2081         bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
2082         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
2083
2084         rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_SCAN_OFFLOAD);
2085         SET_PKT_H2C_TOTAL_LEN(h2c_pkt, H2C_PKT_CH_SWITCH_LEN);
2086
2087         SCAN_OFFLOAD_SET_START(h2c_pkt, opt->switch_en);
2088         SCAN_OFFLOAD_SET_BACK_OP_EN(h2c_pkt, opt->back_op_en);
2089         SCAN_OFFLOAD_SET_RANDOM_SEQ_EN(h2c_pkt, random_seq);
2090         SCAN_OFFLOAD_SET_NO_CCK_EN(h2c_pkt, req->no_cck);
2091         SCAN_OFFLOAD_SET_CH_NUM(h2c_pkt, list->ch_num);
2092         SCAN_OFFLOAD_SET_CH_INFO_SIZE(h2c_pkt, list->size);
2093         SCAN_OFFLOAD_SET_CH_INFO_LOC(h2c_pkt, list->addr - fifo->rsvd_boundary);
2094         SCAN_OFFLOAD_SET_OP_CH(h2c_pkt, scan_info->op_chan);
2095         SCAN_OFFLOAD_SET_OP_PRI_CH_IDX(h2c_pkt, scan_info->op_pri_ch_idx);
2096         SCAN_OFFLOAD_SET_OP_BW(h2c_pkt, scan_info->op_bw);
2097         SCAN_OFFLOAD_SET_OP_PORT_ID(h2c_pkt, rtwvif->port);
2098         SCAN_OFFLOAD_SET_OP_DWELL_TIME(h2c_pkt, req->duration_mandatory ?
2099                                        req->duration : RTW_CHANNEL_TIME);
2100         SCAN_OFFLOAD_SET_OP_GAP_TIME(h2c_pkt, RTW_OFF_CHAN_TIME);
2101         SCAN_OFFLOAD_SET_SSID_NUM(h2c_pkt, req->n_ssids);
2102         SCAN_OFFLOAD_SET_PKT_LOC(h2c_pkt, pkt_loc);
2103
2104         rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
2105 }
2106
2107 void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
2108                        struct ieee80211_scan_request *scan_req)
2109 {
2110         struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
2111         struct cfg80211_scan_request *req = &scan_req->req;
2112         u8 mac_addr[ETH_ALEN];
2113
2114         rtwdev->scan_info.scanning_vif = vif;
2115         rtwvif->scan_ies = &scan_req->ies;
2116         rtwvif->scan_req = req;
2117
2118         ieee80211_stop_queues(rtwdev->hw);
2119         rtw_leave_lps_deep(rtwdev);
2120         rtw_hci_flush_all_queues(rtwdev, false);
2121         rtw_mac_flush_all_queues(rtwdev, false);
2122         if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
2123                 get_random_mask_addr(mac_addr, req->mac_addr,
2124                                      req->mac_addr_mask);
2125         else
2126                 ether_addr_copy(mac_addr, vif->addr);
2127
2128         rtw_core_scan_start(rtwdev, rtwvif, mac_addr, true);
2129
2130         rtwdev->hal.rcr &= ~BIT_CBSSID_BCN;
2131         rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
2132 }
2133
2134 void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
2135                           bool aborted)
2136 {
2137         struct cfg80211_scan_info info = {
2138                 .aborted = aborted,
2139         };
2140         struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2141         struct rtw_hal *hal = &rtwdev->hal;
2142         struct rtw_vif *rtwvif;
2143         u8 chan = scan_info->op_chan;
2144
2145         if (!vif)
2146                 return;
2147
2148         rtwdev->hal.rcr |= BIT_CBSSID_BCN;
2149         rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
2150
2151         rtw_core_scan_complete(rtwdev, vif, true);
2152
2153         rtwvif = (struct rtw_vif *)vif->drv_priv;
2154         if (chan)
2155                 rtw_store_op_chan(rtwdev, false);
2156         rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
2157         ieee80211_wake_queues(rtwdev->hw);
2158         ieee80211_scan_completed(rtwdev->hw, &info);
2159
2160         rtwvif->scan_req = NULL;
2161         rtwvif->scan_ies = NULL;
2162         rtwdev->scan_info.scanning_vif = NULL;
2163 }
2164
2165 static int rtw_hw_scan_prehandle(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
2166                                  struct rtw_chan_list *list)
2167 {
2168         struct cfg80211_scan_request *req = rtwvif->scan_req;
2169         int size = req->n_channels * (RTW_CH_INFO_SIZE + RTW_EX_CH_INFO_SIZE);
2170         u8 *buf;
2171         int ret;
2172
2173         buf = kmalloc(size, GFP_KERNEL);
2174         if (!buf)
2175                 return -ENOMEM;
2176
2177         ret = rtw_hw_scan_update_probe_req(rtwdev, rtwvif);
2178         if (ret) {
2179                 rtw_err(rtwdev, "Update probe request failed\n");
2180                 goto out;
2181         }
2182
2183         list->buf_size = size;
2184         list->size = 0;
2185         list->ch_num = 0;
2186         ret = rtw_add_chan_list(rtwdev, rtwvif, list, buf);
2187 out:
2188         kfree(buf);
2189
2190         return ret;
2191 }
2192
2193 int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
2194                         bool enable)
2195 {
2196         struct rtw_vif *rtwvif = vif ? (struct rtw_vif *)vif->drv_priv : NULL;
2197         struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2198         struct rtw_ch_switch_option cs_option = {0};
2199         struct rtw_chan_list chan_list = {0};
2200         int ret = 0;
2201
2202         if (!rtwvif)
2203                 return -EINVAL;
2204
2205         cs_option.switch_en = enable;
2206         cs_option.back_op_en = scan_info->op_chan != 0;
2207         if (enable) {
2208                 ret = rtw_hw_scan_prehandle(rtwdev, rtwvif, &chan_list);
2209                 if (ret)
2210                         goto out;
2211         }
2212         rtw_fw_set_scan_offload(rtwdev, &cs_option, rtwvif, &chan_list);
2213 out:
2214         if (rtwdev->ap_active) {
2215                 ret = rtw_download_beacon(rtwdev);
2216                 if (ret)
2217                         rtw_err(rtwdev, "HW scan download beacon failed\n");
2218         }
2219
2220         return ret;
2221 }
2222
2223 void rtw_hw_scan_abort(struct rtw_dev *rtwdev)
2224 {
2225         struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
2226
2227         if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD))
2228                 return;
2229
2230         rtw_hw_scan_offload(rtwdev, vif, false);
2231         rtw_hw_scan_complete(rtwdev, vif, true);
2232 }
2233
2234 void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb)
2235 {
2236         struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
2237         struct rtw_c2h_cmd *c2h;
2238         bool aborted;
2239         u8 rc;
2240
2241         if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
2242                 return;
2243
2244         c2h = get_c2h_from_skb(skb);
2245         rc = GET_SCAN_REPORT_RETURN_CODE(c2h->payload);
2246         aborted = rc != RTW_SCAN_REPORT_SUCCESS;
2247         rtw_hw_scan_complete(rtwdev, vif, aborted);
2248
2249         if (aborted)
2250                 rtw_dbg(rtwdev, RTW_DBG_HW_SCAN, "HW scan aborted with code: %d\n", rc);
2251 }
2252
2253 void rtw_store_op_chan(struct rtw_dev *rtwdev, bool backup)
2254 {
2255         struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2256         struct rtw_hal *hal = &rtwdev->hal;
2257         u8 band;
2258
2259         if (backup) {
2260                 scan_info->op_chan = hal->current_channel;
2261                 scan_info->op_bw = hal->current_band_width;
2262                 scan_info->op_pri_ch_idx = hal->current_primary_channel_index;
2263                 scan_info->op_pri_ch = hal->primary_channel;
2264         } else {
2265                 band = scan_info->op_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
2266                 rtw_update_channel(rtwdev, scan_info->op_chan,
2267                                    scan_info->op_pri_ch,
2268                                    band, scan_info->op_bw);
2269         }
2270 }
2271
2272 void rtw_clear_op_chan(struct rtw_dev *rtwdev)
2273 {
2274         struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2275
2276         scan_info->op_chan = 0;
2277         scan_info->op_bw = 0;
2278         scan_info->op_pri_ch_idx = 0;
2279         scan_info->op_pri_ch = 0;
2280 }
2281
2282 static bool rtw_is_op_chan(struct rtw_dev *rtwdev, u8 channel)
2283 {
2284         struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2285
2286         return channel == scan_info->op_chan;
2287 }
2288
2289 void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
2290 {
2291         struct rtw_hal *hal = &rtwdev->hal;
2292         struct rtw_c2h_cmd *c2h;
2293         enum rtw_scan_notify_id id;
2294         u8 chan, band, status;
2295
2296         if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
2297                 return;
2298
2299         c2h = get_c2h_from_skb(skb);
2300         chan = GET_CHAN_SWITCH_CENTRAL_CH(c2h->payload);
2301         id = GET_CHAN_SWITCH_ID(c2h->payload);
2302         status = GET_CHAN_SWITCH_STATUS(c2h->payload);
2303
2304         if (id == RTW_SCAN_NOTIFY_ID_POSTSWITCH) {
2305                 band = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
2306                 rtw_update_channel(rtwdev, chan, chan, band,
2307                                    RTW_CHANNEL_WIDTH_20);
2308                 if (rtw_is_op_chan(rtwdev, chan)) {
2309                         rtw_store_op_chan(rtwdev, false);
2310                         ieee80211_wake_queues(rtwdev->hw);
2311                         rtw_core_enable_beacon(rtwdev, true);
2312                 }
2313         } else if (id == RTW_SCAN_NOTIFY_ID_PRESWITCH) {
2314                 if (IS_CH_5G_BAND(chan)) {
2315                         rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
2316                 } else if (IS_CH_2G_BAND(chan)) {
2317                         u8 chan_type;
2318
2319                         if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
2320                                 chan_type = COEX_SWITCH_TO_24G;
2321                         else
2322                                 chan_type = COEX_SWITCH_TO_24G_NOFORSCAN;
2323                         rtw_coex_switchband_notify(rtwdev, chan_type);
2324                 }
2325                 /* The channel of C2H RTW_SCAN_NOTIFY_ID_PRESWITCH is next
2326                  * channel that hardware will switch. We need to stop queue
2327                  * if next channel is non-op channel.
2328                  */
2329                 if (!rtw_is_op_chan(rtwdev, chan) &&
2330                     rtw_is_op_chan(rtwdev, hal->current_channel)) {
2331                         rtw_core_enable_beacon(rtwdev, false);
2332                         ieee80211_stop_queues(rtwdev->hw);
2333                 }
2334         }
2335
2336         rtw_dbg(rtwdev, RTW_DBG_HW_SCAN,
2337                 "Chan switch: %x, id: %x, status: %x\n", chan, id, status);
2338 }