1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2021-2022 Intel Corporation
6 #include <linux/etherdevice.h>
7 #include <linux/netdevice.h>
8 #include <linux/ieee80211.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/mei_cl_bus.h>
13 #include <linux/rcupdate.h>
14 #include <linux/debugfs.h>
15 #include <linux/skbuff.h>
16 #include <linux/wait.h>
17 #include <linux/slab.h>
20 #include <net/cfg80211.h>
25 #include "trace-data.h"
28 MODULE_DESCRIPTION("The Intel(R) wireless / CSME firmware interface");
29 MODULE_LICENSE("GPL");
31 #define MEI_WLAN_UUID UUID_LE(0x13280904, 0x7792, 0x4fcb, \
32 0xa1, 0xaa, 0x5e, 0x70, 0xcb, 0xb1, 0xe8, 0x65)
35 * Since iwlwifi calls iwlmei without any context, hold a pointer to the
36 * mei_cl_device structure here.
37 * Define a mutex that will synchronize all the flows between iwlwifi and
39 * Note that iwlmei can't have several instances, so it ok to have static
42 static struct mei_cl_device *iwl_mei_global_cldev;
43 static DEFINE_MUTEX(iwl_mei_mutex);
44 static unsigned long iwl_mei_status;
46 enum iwl_mei_status_bits {
47 IWL_MEI_STATUS_SAP_CONNECTED,
50 bool iwl_mei_is_connected(void)
52 return test_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
54 EXPORT_SYMBOL_GPL(iwl_mei_is_connected);
57 #define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */
59 struct iwl_sap_q_ctrl_blk {
66 SAP_QUEUE_IDX_NOTIF = 0,
73 struct iwl_sap_q_ctrl_blk q_ctrl_blk[SAP_QUEUE_IDX_MAX];
76 enum iwl_sap_dir_idx {
77 SAP_DIRECTION_HOST_TO_ME = 0,
78 SAP_DIRECTION_ME_TO_HOST,
82 struct iwl_sap_shared_mem_ctrl_blk {
85 struct iwl_sap_dir dir[SAP_DIRECTION_MAX];
89 * The shared area has the following layout:
91 * +-----------------------------------+
92 * |struct iwl_sap_shared_mem_ctrl_blk |
93 * +-----------------------------------+
94 * |Host -> ME data queue |
95 * +-----------------------------------+
96 * |Host -> ME notif queue |
97 * +-----------------------------------+
98 * |ME -> Host data queue |
99 * +-----------------------------------+
100 * |ME -> host notif queue |
101 * +-----------------------------------+
102 * |SAP control block id (SAP!) |
103 * +-----------------------------------+
106 #define SAP_H2M_DATA_Q_SZ 48256
107 #define SAP_M2H_DATA_Q_SZ 24128
108 #define SAP_H2M_NOTIF_Q_SZ 2240
109 #define SAP_M2H_NOTIF_Q_SZ 62720
111 #define _IWL_MEI_SAP_SHARED_MEM_SZ \
112 (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \
113 SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \
114 SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4)
116 #define IWL_MEI_SAP_SHARED_MEM_SZ \
117 (roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE))
119 struct iwl_mei_shared_mem_ptrs {
120 struct iwl_sap_shared_mem_ctrl_blk *ctrl;
121 void *q_head[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
122 size_t q_size[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
125 struct iwl_mei_filters {
126 struct rcu_head rcu_head;
127 struct iwl_sap_oob_filters filters;
131 * struct iwl_mei - holds the private date for iwl_mei
133 * @get_nvm_wq: the wait queue for the get_nvm flow
134 * @send_csa_msg_wk: used to defer the transmission of the CHECK_SHARED_AREA
135 * message. Used so that we can send CHECK_SHARED_AREA from atomic
137 * @get_ownership_wq: the wait queue for the get_ownership_flow
138 * @shared_mem: the memory that is shared between CSME and the host
139 * @cldev: the pointer to the MEI client device
140 * @nvm: the data returned by the CSME for the NVM
141 * @filters: the filters sent by CSME
142 * @got_ownership: true if we own the device
143 * @amt_enabled: true if CSME has wireless enabled
144 * @csa_throttled: when true, we can't send CHECK_SHARED_AREA over the MEI
145 * bus, but rather need to wait until send_csa_msg_wk runs
146 * @csme_taking_ownership: true when CSME is taking ownership. Used to remember
147 * to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
149 * @link_prot_state: true when we are in link protection PASSIVE
150 * @csa_throttle_end_wk: used when &csa_throttled is true
151 * @data_q_lock: protects the access to the data queues which are
152 * accessed without the mutex.
153 * @sap_seq_no: the sequence number for the SAP messages
154 * @seq_no: the sequence number for the SAP messages
155 * @dbgfs_dir: the debugfs dir entry
158 wait_queue_head_t get_nvm_wq;
159 struct work_struct send_csa_msg_wk;
160 wait_queue_head_t get_ownership_wq;
161 struct iwl_mei_shared_mem_ptrs shared_mem;
162 struct mei_cl_device *cldev;
163 struct iwl_mei_nvm *nvm;
164 struct iwl_mei_filters __rcu *filters;
168 bool csme_taking_ownership;
169 bool link_prot_state;
170 struct delayed_work csa_throttle_end_wk;
171 spinlock_t data_q_lock;
176 struct dentry *dbgfs_dir;
180 * struct iwl_mei_cache - cache for the parameters from iwlwifi
181 * @ops: Callbacks to iwlwifi.
182 * @netdev: The netdev that will be used to transmit / receive packets.
183 * @conn_info: The connection info message triggered by iwlwifi's association.
184 * @power_limit: pointer to an array of 10 elements (le16) represents the power
185 * restrictions per chain.
186 * @rf_kill: rf kill state.
188 * @mac_address: interface MAC address.
189 * @nvm_address: NVM MAC address.
190 * @priv: A pointer to iwlwifi.
192 * This used to cache the configurations coming from iwlwifi's way. The data
193 * is cached here so that we can buffer the configuration even if we don't have
194 * a bind from the mei bus and hence, on iwl_mei structure.
196 struct iwl_mei_cache {
197 const struct iwl_mei_ops *ops;
198 struct net_device __rcu *netdev;
199 const struct iwl_sap_notif_connection_info *conn_info;
200 const __le16 *power_limit;
208 static struct iwl_mei_cache iwl_mei_cache = {
209 .rf_kill = SAP_HW_RFKILL_DEASSERTED | SAP_SW_RFKILL_DEASSERTED
212 static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev)
214 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
216 if (mei_cldev_dma_unmap(cldev))
217 dev_err(&cldev->dev, "Couldn't unmap the shared mem properly\n");
218 memset(&mei->shared_mem, 0, sizeof(mei->shared_mem));
221 #define HBM_DMA_BUF_ID_WLAN 1
223 static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
225 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
226 struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
228 mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN,
229 IWL_MEI_SAP_SHARED_MEM_SZ);
231 if (IS_ERR(mem->ctrl)) {
232 int ret = PTR_ERR(mem->ctrl);
239 memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ);
244 static void iwl_mei_init_shared_mem(struct iwl_mei *mei)
246 struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
247 struct iwl_sap_dir *h2m;
248 struct iwl_sap_dir *m2h;
252 mem->ctrl->sap_id = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
254 mem->ctrl->size = cpu_to_le32(sizeof(*mem->ctrl));
256 h2m = &mem->ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
257 m2h = &mem->ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
259 h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
260 cpu_to_le32(SAP_H2M_DATA_Q_SZ);
261 h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
262 cpu_to_le32(SAP_H2M_NOTIF_Q_SZ);
263 m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
264 cpu_to_le32(SAP_M2H_DATA_Q_SZ);
265 m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
266 cpu_to_le32(SAP_M2H_NOTIF_Q_SZ);
268 /* q_head points to the start of the first queue */
269 q_head = (void *)(mem->ctrl + 1);
271 /* Initialize the queue heads */
272 for (dir = 0; dir < SAP_DIRECTION_MAX; dir++) {
273 for (queue = 0; queue < SAP_QUEUE_IDX_MAX; queue++) {
274 mem->q_head[dir][queue] = q_head;
276 le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
277 mem->q_size[dir][queue] =
278 le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
282 *(__le32 *)q_head = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
285 static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev,
286 struct iwl_sap_q_ctrl_blk *notif_q,
288 const struct iwl_sap_hdr *hdr,
291 u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
292 u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
294 size_t tx_sz = sizeof(*hdr) + le16_to_cpu(hdr->len);
296 if (rd > q_sz || wr > q_sz) {
298 "Pointers are past the end of the buffer\n");
302 room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
304 /* we don't have enough room for the data to write */
305 if (room_in_buf < tx_sz) {
307 "Not enough room in the buffer\n");
311 if (wr + tx_sz <= q_sz) {
312 memcpy(q_head + wr, hdr, tx_sz);
314 memcpy(q_head + wr, hdr, q_sz - wr);
315 memcpy(q_head, (u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
318 WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
322 static bool iwl_mei_host_to_me_data_pending(const struct iwl_mei *mei)
324 struct iwl_sap_q_ctrl_blk *notif_q;
325 struct iwl_sap_dir *dir;
327 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
328 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
330 if (READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr))
333 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
334 return READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr);
337 static int iwl_mei_send_check_shared_area(struct mei_cl_device *cldev)
339 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
340 struct iwl_sap_me_msg_start msg = {
341 .hdr.type = cpu_to_le32(SAP_ME_MSG_CHECK_SHARED_AREA),
342 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
346 lockdep_assert_held(&iwl_mei_mutex);
348 if (mei->csa_throttled)
351 trace_iwlmei_me_msg(&msg.hdr, true);
352 ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
353 if (ret != sizeof(msg)) {
355 "failed to send the SAP_ME_MSG_CHECK_SHARED_AREA message %d\n",
360 mei->csa_throttled = true;
362 schedule_delayed_work(&mei->csa_throttle_end_wk,
363 msecs_to_jiffies(100));
368 static void iwl_mei_csa_throttle_end_wk(struct work_struct *wk)
370 struct iwl_mei *mei =
371 container_of(wk, struct iwl_mei, csa_throttle_end_wk.work);
373 mutex_lock(&iwl_mei_mutex);
375 mei->csa_throttled = false;
377 if (iwl_mei_host_to_me_data_pending(mei))
378 iwl_mei_send_check_shared_area(mei->cldev);
380 mutex_unlock(&iwl_mei_mutex);
383 static int iwl_mei_send_sap_msg_payload(struct mei_cl_device *cldev,
384 struct iwl_sap_hdr *hdr)
386 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
387 struct iwl_sap_q_ctrl_blk *notif_q;
388 struct iwl_sap_dir *dir;
393 lockdep_assert_held(&iwl_mei_mutex);
395 if (!mei->shared_mem.ctrl) {
397 "No shared memory, can't send any SAP message\n");
401 if (!iwl_mei_is_connected()) {
403 "Can't send a SAP message if we're not connected\n");
407 hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
408 dev_dbg(&cldev->dev, "Sending %d\n", hdr->type);
410 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
411 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
412 q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
413 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
414 ret = iwl_mei_write_cyclic_buf(q_head, notif_q, q_head, hdr, q_sz);
419 trace_iwlmei_sap_cmd(hdr, true);
421 return iwl_mei_send_check_shared_area(cldev);
424 void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
426 struct iwl_sap_q_ctrl_blk *notif_q;
427 struct iwl_sap_dir *dir;
437 if (!iwl_mei_global_cldev)
440 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
443 * We access this path for Rx packets (the more common case)
444 * and from Tx path when we send DHCP packets, the latter is
446 * Take the lock already here to make sure we see that remove()
447 * might have cleared the IWL_MEI_STATUS_SAP_CONNECTED bit.
449 spin_lock_bh(&mei->data_q_lock);
451 if (!iwl_mei_is_connected()) {
452 spin_unlock_bh(&mei->data_q_lock);
457 * We are in a RCU critical section and the remove from the CSME bus
458 * which would free this memory waits for the readers to complete (this
459 * is done in netdev_rx_handler_unregister).
461 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
462 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
463 q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
464 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
466 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
467 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
468 hdr_sz = cb_tx ? sizeof(struct iwl_sap_cb_data) :
469 sizeof(struct iwl_sap_hdr);
470 tx_sz = skb->len + hdr_sz;
472 if (rd > q_sz || wr > q_sz) {
473 dev_err(&mei->cldev->dev,
474 "can't write the data: pointers are past the end of the buffer\n");
478 room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
480 /* we don't have enough room for the data to write */
481 if (room_in_buf < tx_sz) {
482 dev_err(&mei->cldev->dev,
483 "Not enough room in the buffer for this data\n");
487 if (skb_headroom(skb) < hdr_sz) {
488 dev_err(&mei->cldev->dev,
489 "Not enough headroom in the skb to write the SAP header\n");
494 struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr));
496 cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET);
497 cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr));
498 cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
499 cb_hdr->to_me_filt_status = cpu_to_le32(BIT(CB_TX_DHCP_FILT_IDX));
500 cb_hdr->data_len = cpu_to_le32(skb->len - sizeof(*cb_hdr));
501 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DHCP);
503 struct iwl_sap_hdr *hdr = skb_push(skb, sizeof(*hdr));
505 hdr->type = cpu_to_le16(SAP_MSG_DATA_PACKET);
506 hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
507 hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
508 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DATA_FROM_AIR);
511 if (wr + tx_sz <= q_sz) {
512 skb_copy_bits(skb, 0, q_head + wr, tx_sz);
514 skb_copy_bits(skb, 0, q_head + wr, q_sz - wr);
515 skb_copy_bits(skb, q_sz - wr, q_head, tx_sz - (q_sz - wr));
518 WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
521 spin_unlock_bh(&mei->data_q_lock);
525 iwl_mei_send_sap_msg(struct mei_cl_device *cldev, u16 type)
527 struct iwl_sap_hdr msg = {
528 .type = cpu_to_le16(type),
531 return iwl_mei_send_sap_msg_payload(cldev, &msg);
534 static void iwl_mei_send_csa_msg_wk(struct work_struct *wk)
536 struct iwl_mei *mei =
537 container_of(wk, struct iwl_mei, send_csa_msg_wk);
539 if (!iwl_mei_is_connected())
542 mutex_lock(&iwl_mei_mutex);
544 iwl_mei_send_check_shared_area(mei->cldev);
546 mutex_unlock(&iwl_mei_mutex);
549 /* Called in a RCU read critical section from netif_receive_skb */
550 static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb)
552 struct sk_buff *skb = *pskb;
553 struct iwl_mei *mei =
554 rcu_dereference(skb->dev->rx_handler_data);
555 struct iwl_mei_filters *filters = rcu_dereference(mei->filters);
556 bool rx_for_csme = false;
557 rx_handler_result_t res;
560 * remove() unregisters this handler and synchronize_net, so this
561 * should never happen.
563 if (!iwl_mei_is_connected()) {
564 dev_err(&mei->cldev->dev,
565 "Got an Rx packet, but we're not connected to SAP?\n");
566 return RX_HANDLER_PASS;
570 res = iwl_mei_rx_filter(skb, &filters->filters, &rx_for_csme);
572 res = RX_HANDLER_PASS;
575 * The data is already on the ring of the shared area, all we
576 * need to do is to tell the CSME firmware to check what we have
580 schedule_work(&mei->send_csa_msg_wk);
582 if (res != RX_HANDLER_PASS) {
583 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_DROPPED_FROM_AIR);
591 iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
592 const struct iwl_sap_me_msg_start_ok *rsp,
595 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
597 if (len != sizeof(*rsp)) {
599 "got invalid SAP_ME_MSG_START_OK from CSME firmware\n");
601 "size is incorrect: %zd instead of %zu\n",
606 if (rsp->supported_version != SAP_VERSION) {
608 "didn't get the expected version: got %d\n",
609 rsp->supported_version);
613 mutex_lock(&iwl_mei_mutex);
614 set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
615 /* wifi driver has registered already */
616 if (iwl_mei_cache.ops) {
617 iwl_mei_send_sap_msg(mei->cldev,
618 SAP_MSG_NOTIF_WIFIDR_UP);
619 iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv);
622 mutex_unlock(&iwl_mei_mutex);
625 static void iwl_mei_handle_csme_filters(struct mei_cl_device *cldev,
626 const struct iwl_sap_csme_filters *filters)
628 struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
629 struct iwl_mei_filters *new_filters;
630 struct iwl_mei_filters *old_filters;
633 rcu_dereference_protected(mei->filters,
634 lockdep_is_held(&iwl_mei_mutex));
636 new_filters = kzalloc(sizeof(*new_filters), GFP_KERNEL);
640 /* Copy the OOB filters */
641 new_filters->filters = filters->filters;
643 rcu_assign_pointer(mei->filters, new_filters);
646 kfree_rcu(old_filters, rcu_head);
650 iwl_mei_handle_conn_status(struct mei_cl_device *cldev,
651 const struct iwl_sap_notif_conn_status *status)
653 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
654 struct iwl_mei_conn_info conn_info = {
655 .lp_state = le32_to_cpu(status->link_prot_state),
656 .ssid_len = le32_to_cpu(status->conn_info.ssid_len),
657 .channel = status->conn_info.channel,
658 .band = status->conn_info.band,
659 .auth_mode = le32_to_cpu(status->conn_info.auth_mode),
660 .pairwise_cipher = le32_to_cpu(status->conn_info.pairwise_cipher),
663 if (!iwl_mei_cache.ops ||
664 conn_info.ssid_len > ARRAY_SIZE(conn_info.ssid))
667 memcpy(conn_info.ssid, status->conn_info.ssid, conn_info.ssid_len);
668 ether_addr_copy(conn_info.bssid, status->conn_info.bssid);
670 iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
672 mei->link_prot_state = status->link_prot_state;
675 * Update the Rfkill state in case the host does not own the device:
676 * if we are in Link Protection, ask to not touch the device, else,
678 * If the host owns the device, inform the user space whether it can
681 if (mei->got_ownership)
682 iwl_mei_cache.ops->roaming_forbidden(iwl_mei_cache.priv,
683 status->link_prot_state);
685 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv,
686 status->link_prot_state);
689 static void iwl_mei_set_init_conf(struct iwl_mei *mei)
691 struct iwl_sap_notif_host_link_up link_msg = {
692 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
693 .hdr.len = cpu_to_le16(sizeof(link_msg) - sizeof(link_msg.hdr)),
695 struct iwl_sap_notif_country_code mcc_msg = {
696 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
697 .hdr.len = cpu_to_le16(sizeof(mcc_msg) - sizeof(mcc_msg.hdr)),
698 .mcc = cpu_to_le16(iwl_mei_cache.mcc),
700 struct iwl_sap_notif_sar_limits sar_msg = {
701 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
702 .hdr.len = cpu_to_le16(sizeof(sar_msg) - sizeof(sar_msg.hdr)),
704 struct iwl_sap_notif_host_nic_info nic_info_msg = {
705 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
706 .hdr.len = cpu_to_le16(sizeof(nic_info_msg) - sizeof(nic_info_msg.hdr)),
708 struct iwl_sap_msg_dw rfkill_msg = {
709 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
710 .hdr.len = cpu_to_le16(sizeof(rfkill_msg) - sizeof(rfkill_msg.hdr)),
711 .val = cpu_to_le32(iwl_mei_cache.rf_kill),
714 iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC);
716 if (iwl_mei_cache.conn_info) {
717 link_msg.conn_info = *iwl_mei_cache.conn_info;
718 iwl_mei_send_sap_msg_payload(mei->cldev, &link_msg.hdr);
721 iwl_mei_send_sap_msg_payload(mei->cldev, &mcc_msg.hdr);
723 if (iwl_mei_cache.power_limit) {
724 memcpy(sar_msg.sar_chain_info_table, iwl_mei_cache.power_limit,
725 sizeof(sar_msg.sar_chain_info_table));
726 iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr);
729 ether_addr_copy(nic_info_msg.mac_address, iwl_mei_cache.mac_address);
730 ether_addr_copy(nic_info_msg.nvm_address, iwl_mei_cache.nvm_address);
731 iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr);
733 iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr);
736 static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
737 const struct iwl_sap_msg_dw *dw)
739 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
740 struct net_device *netdev;
743 * First take rtnl and only then the mutex to avoid an ABBA
744 * with iwl_mei_set_netdev()
747 mutex_lock(&iwl_mei_mutex);
749 netdev = rcu_dereference_protected(iwl_mei_cache.netdev,
750 lockdep_is_held(&iwl_mei_mutex));
752 if (mei->amt_enabled == !!le32_to_cpu(dw->val))
755 mei->amt_enabled = dw->val;
757 if (mei->amt_enabled) {
759 netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
761 iwl_mei_set_init_conf(mei);
763 if (iwl_mei_cache.ops)
764 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
766 netdev_rx_handler_unregister(netdev);
770 mutex_unlock(&iwl_mei_mutex);
774 static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev,
775 const struct iwl_sap_msg_dw *dw)
777 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
779 mei->got_ownership = dw->val != cpu_to_le32(SAP_NIC_OWNER_ME);
782 static void iwl_mei_handle_can_release_ownership(struct mei_cl_device *cldev,
785 /* We can get ownership and driver is registered, go ahead */
786 if (iwl_mei_cache.ops)
787 iwl_mei_send_sap_msg(cldev,
788 SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
791 static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
794 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
796 dev_info(&cldev->dev, "CSME takes ownership\n");
798 mei->got_ownership = false;
801 * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi driver
802 * is finished taking the device down.
804 mei->csme_taking_ownership = true;
806 if (iwl_mei_cache.ops)
807 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true);
810 static void iwl_mei_handle_nvm(struct mei_cl_device *cldev,
811 const struct iwl_sap_nvm *sap_nvm)
813 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
814 const struct iwl_mei_nvm *mei_nvm = (const void *)sap_nvm;
818 mei->nvm = kzalloc(sizeof(*mei_nvm), GFP_KERNEL);
822 ether_addr_copy(mei->nvm->hw_addr, sap_nvm->hw_addr);
823 mei->nvm->n_hw_addrs = sap_nvm->n_hw_addrs;
824 mei->nvm->radio_cfg = le32_to_cpu(sap_nvm->radio_cfg);
825 mei->nvm->caps = le32_to_cpu(sap_nvm->caps);
826 mei->nvm->nvm_version = le32_to_cpu(sap_nvm->nvm_version);
828 for (i = 0; i < ARRAY_SIZE(mei->nvm->channels); i++)
829 mei->nvm->channels[i] = le32_to_cpu(sap_nvm->channels[i]);
831 wake_up_all(&mei->get_nvm_wq);
834 static void iwl_mei_handle_rx_host_own_req(struct mei_cl_device *cldev,
835 const struct iwl_sap_msg_dw *dw)
837 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
840 * This means that we can't use the wifi device right now, CSME is not
841 * ready to let us use it.
844 dev_info(&cldev->dev, "Ownership req denied\n");
848 mei->got_ownership = true;
849 wake_up_all(&mei->get_ownership_wq);
851 iwl_mei_send_sap_msg(cldev,
852 SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED);
854 /* We can now start the connection, unblock rfkill */
855 if (iwl_mei_cache.ops)
856 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
859 static void iwl_mei_handle_ping(struct mei_cl_device *cldev,
860 const struct iwl_sap_hdr *hdr)
862 iwl_mei_send_sap_msg(cldev, SAP_MSG_NOTIF_PONG);
865 static void iwl_mei_handle_sap_msg(struct mei_cl_device *cldev,
866 const struct iwl_sap_hdr *hdr)
868 u16 len = le16_to_cpu(hdr->len) + sizeof(*hdr);
869 u16 type = le16_to_cpu(hdr->type);
872 "Got a new SAP message: type %d, len %d, seq %d\n",
873 le16_to_cpu(hdr->type), len,
874 le32_to_cpu(hdr->seq_num));
876 #define SAP_MSG_HANDLER(_cmd, _handler, _sz) \
877 case SAP_MSG_NOTIF_ ## _cmd: \
879 dev_err(&cldev->dev, \
880 "Bad size for %d: %u < %u\n", \
881 le16_to_cpu(hdr->type), \
883 (unsigned int)_sz); \
886 mutex_lock(&iwl_mei_mutex); \
887 _handler(cldev, (const void *)hdr); \
888 mutex_unlock(&iwl_mei_mutex); \
891 #define SAP_MSG_HANDLER_NO_LOCK(_cmd, _handler, _sz) \
892 case SAP_MSG_NOTIF_ ## _cmd: \
894 dev_err(&cldev->dev, \
895 "Bad size for %d: %u < %u\n", \
896 le16_to_cpu(hdr->type), \
898 (unsigned int)_sz); \
901 _handler(cldev, (const void *)hdr); \
904 #define SAP_MSG_HANDLER_NO_HANDLER(_cmd, _sz) \
905 case SAP_MSG_NOTIF_ ## _cmd: \
907 dev_err(&cldev->dev, \
908 "Bad size for %d: %u < %u\n", \
909 le16_to_cpu(hdr->type), \
911 (unsigned int)_sz); \
917 SAP_MSG_HANDLER(PING, iwl_mei_handle_ping, 0);
918 SAP_MSG_HANDLER(CSME_FILTERS,
919 iwl_mei_handle_csme_filters,
920 sizeof(struct iwl_sap_csme_filters));
921 SAP_MSG_HANDLER(CSME_CONN_STATUS,
922 iwl_mei_handle_conn_status,
923 sizeof(struct iwl_sap_notif_conn_status));
924 SAP_MSG_HANDLER_NO_LOCK(AMT_STATE,
925 iwl_mei_handle_amt_state,
926 sizeof(struct iwl_sap_msg_dw));
927 SAP_MSG_HANDLER_NO_HANDLER(PONG, 0);
928 SAP_MSG_HANDLER(NVM, iwl_mei_handle_nvm,
929 sizeof(struct iwl_sap_nvm));
930 SAP_MSG_HANDLER(CSME_REPLY_TO_HOST_OWNERSHIP_REQ,
931 iwl_mei_handle_rx_host_own_req,
932 sizeof(struct iwl_sap_msg_dw));
933 SAP_MSG_HANDLER(NIC_OWNER, iwl_mei_handle_nic_owner,
934 sizeof(struct iwl_sap_msg_dw));
935 SAP_MSG_HANDLER(CSME_CAN_RELEASE_OWNERSHIP,
936 iwl_mei_handle_can_release_ownership, 0);
937 SAP_MSG_HANDLER(CSME_TAKING_OWNERSHIP,
938 iwl_mei_handle_csme_taking_ownership, 0);
941 * This is not really an error, there are message that we decided
942 * to ignore, yet, it is useful to be able to leave a note if debug
945 dev_dbg(&cldev->dev, "Unsupported message: type %d, len %d\n",
946 le16_to_cpu(hdr->type), len);
949 #undef SAP_MSG_HANDLER
950 #undef SAP_MSG_HANDLER_NO_LOCK
953 static void iwl_mei_read_from_q(const u8 *q_head, u32 q_sz,
960 if (rd + len <= q_sz) {
961 memcpy(buf, q_head + rd, len);
964 memcpy(buf, q_head + rd, q_sz - rd);
965 memcpy(buf + q_sz - rd, q_head, len - (q_sz - rd));
966 rd = len - (q_sz - rd);
972 #define QOS_HDR_IV_SNAP_LEN (sizeof(struct ieee80211_qos_hdr) + \
973 IEEE80211_TKIP_IV_LEN + \
974 sizeof(rfc1042_header) + ETH_TLEN)
976 static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev,
977 const u8 *q_head, u32 q_sz,
978 u32 rd, u32 wr, ssize_t valid_rx_sz,
979 struct sk_buff_head *tx_skbs)
981 struct iwl_sap_hdr hdr;
982 struct net_device *netdev =
983 rcu_dereference_protected(iwl_mei_cache.netdev,
984 lockdep_is_held(&iwl_mei_mutex));
989 while (valid_rx_sz >= sizeof(hdr)) {
990 struct ethhdr *ethhdr;
995 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, &hdr, sizeof(hdr));
996 valid_rx_sz -= sizeof(hdr);
997 len = le16_to_cpu(hdr.len);
999 if (valid_rx_sz < len) {
1000 dev_err(&cldev->dev,
1001 "Data queue is corrupted: valid data len %zd, len %d\n",
1006 if (len < sizeof(*ethhdr)) {
1007 dev_err(&cldev->dev,
1008 "Data len is smaller than an ethernet header? len = %d\n",
1014 if (le16_to_cpu(hdr.type) != SAP_MSG_DATA_PACKET) {
1015 dev_err(&cldev->dev, "Unsupported Rx data: type %d, len %d\n",
1016 le16_to_cpu(hdr.type), len);
1020 /* We need enough room for the WiFi header + SNAP + IV */
1021 skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN);
1023 skb_reserve(skb, QOS_HDR_IV_SNAP_LEN);
1024 ethhdr = skb_push(skb, sizeof(*ethhdr));
1026 iwl_mei_read_from_q(q_head, q_sz, &rd, wr,
1027 ethhdr, sizeof(*ethhdr));
1028 len -= sizeof(*ethhdr);
1030 skb_reset_mac_header(skb);
1031 skb_reset_network_header(skb);
1032 skb->protocol = ethhdr->h_proto;
1034 data = skb_put(skb, len);
1035 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, data, len);
1038 * Enqueue the skb here so that it can be sent later when we
1039 * do not hold the mutex. TX'ing a packet with a mutex held is
1040 * possible, but it wouldn't be nice to forbid the TX path to
1041 * call any of iwlmei's functions, since every API from iwlmei
1044 __skb_queue_tail(tx_skbs, skb);
1048 static void iwl_mei_handle_sap_rx_cmd(struct mei_cl_device *cldev,
1049 const u8 *q_head, u32 q_sz,
1050 u32 rd, u32 wr, ssize_t valid_rx_sz)
1052 struct page *p = alloc_page(GFP_KERNEL);
1053 struct iwl_sap_hdr *hdr;
1058 hdr = page_address(p);
1060 while (valid_rx_sz >= sizeof(*hdr)) {
1063 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr, sizeof(*hdr));
1064 valid_rx_sz -= sizeof(*hdr);
1065 len = le16_to_cpu(hdr->len);
1067 if (valid_rx_sz < len)
1070 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr + 1, len);
1072 trace_iwlmei_sap_cmd(hdr, false);
1073 iwl_mei_handle_sap_msg(cldev, hdr);
1077 /* valid_rx_sz must be 0 now... */
1079 dev_err(&cldev->dev,
1080 "More data in the buffer although we read it all\n");
1085 static void iwl_mei_handle_sap_rx(struct mei_cl_device *cldev,
1086 struct iwl_sap_q_ctrl_blk *notif_q,
1088 struct sk_buff_head *skbs,
1091 u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
1092 u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
1093 ssize_t valid_rx_sz;
1095 if (rd > q_sz || wr > q_sz) {
1096 dev_err(&cldev->dev,
1097 "Pointers are past the buffer limit\n");
1104 valid_rx_sz = wr > rd ? wr - rd : q_sz - rd + wr;
1107 iwl_mei_handle_sap_data(cldev, q_head, q_sz, rd, wr,
1110 iwl_mei_handle_sap_rx_cmd(cldev, q_head, q_sz, rd, wr,
1113 /* Increment the read pointer to point to the write pointer */
1114 WRITE_ONCE(notif_q->rd_ptr, cpu_to_le32(wr));
1117 static void iwl_mei_handle_check_shared_area(struct mei_cl_device *cldev)
1119 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1120 struct iwl_sap_q_ctrl_blk *notif_q;
1121 struct sk_buff_head tx_skbs;
1122 struct iwl_sap_dir *dir;
1126 if (!mei->shared_mem.ctrl)
1129 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1130 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
1131 q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1132 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1135 * Do not hold the mutex here, but rather each and every message
1137 * This allows message handlers to take it at a certain time.
1139 iwl_mei_handle_sap_rx(cldev, notif_q, q_head, NULL, q_sz);
1141 mutex_lock(&iwl_mei_mutex);
1142 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1143 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
1144 q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1145 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1147 __skb_queue_head_init(&tx_skbs);
1149 iwl_mei_handle_sap_rx(cldev, notif_q, q_head, &tx_skbs, q_sz);
1151 if (skb_queue_empty(&tx_skbs)) {
1152 mutex_unlock(&iwl_mei_mutex);
1157 * Take the RCU read lock before we unlock the mutex to make sure that
1158 * even if the netdev is replaced by another non-NULL netdev right after
1159 * we unlock the mutex, the old netdev will still be valid when we
1160 * transmit the frames. We can't allow to replace the netdev here because
1161 * the skbs hold a pointer to the netdev.
1165 mutex_unlock(&iwl_mei_mutex);
1167 if (!rcu_access_pointer(iwl_mei_cache.netdev)) {
1168 dev_err(&cldev->dev, "Can't Tx without a netdev\n");
1169 skb_queue_purge(&tx_skbs);
1173 while (!skb_queue_empty(&tx_skbs)) {
1174 struct sk_buff *skb = __skb_dequeue(&tx_skbs);
1176 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_TO_AIR);
1177 dev_queue_xmit(skb);
1184 static void iwl_mei_rx(struct mei_cl_device *cldev)
1186 struct iwl_sap_me_msg_hdr *hdr;
1190 ret = mei_cldev_recv(cldev, (u8 *)&msg, sizeof(msg));
1192 dev_err(&cldev->dev, "failed to receive data: %zd\n", ret);
1197 dev_err(&cldev->dev, "got an empty response\n");
1202 trace_iwlmei_me_msg(hdr, false);
1204 switch (le32_to_cpu(hdr->type)) {
1205 case SAP_ME_MSG_START_OK:
1206 BUILD_BUG_ON(sizeof(struct iwl_sap_me_msg_start_ok) >
1209 iwl_mei_handle_rx_start_ok(cldev, (void *)msg, ret);
1211 case SAP_ME_MSG_CHECK_SHARED_AREA:
1212 iwl_mei_handle_check_shared_area(cldev);
1215 dev_err(&cldev->dev, "got a RX notification: %d\n",
1216 le32_to_cpu(hdr->type));
1221 static int iwl_mei_send_start(struct mei_cl_device *cldev)
1223 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1224 struct iwl_sap_me_msg_start msg = {
1225 .hdr.type = cpu_to_le32(SAP_ME_MSG_START),
1226 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
1227 .hdr.len = cpu_to_le32(sizeof(msg)),
1228 .supported_versions[0] = SAP_VERSION,
1229 .init_data_seq_num = cpu_to_le16(0x100),
1230 .init_notif_seq_num = cpu_to_le16(0x800),
1234 trace_iwlmei_me_msg(&msg.hdr, true);
1235 ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
1236 if (ret != sizeof(msg)) {
1237 dev_err(&cldev->dev,
1238 "failed to send the SAP_ME_MSG_START message %d\n",
1246 static int iwl_mei_enable(struct mei_cl_device *cldev)
1250 ret = mei_cldev_enable(cldev);
1252 dev_err(&cldev->dev, "failed to enable the device: %d\n", ret);
1256 ret = mei_cldev_register_rx_cb(cldev, iwl_mei_rx);
1258 dev_err(&cldev->dev,
1259 "failed to register to the rx cb: %d\n", ret);
1260 mei_cldev_disable(cldev);
1267 struct iwl_mei_nvm *iwl_mei_get_nvm(void)
1269 struct iwl_mei_nvm *nvm = NULL;
1270 struct iwl_mei *mei;
1273 mutex_lock(&iwl_mei_mutex);
1275 if (!iwl_mei_is_connected())
1278 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1283 ret = iwl_mei_send_sap_msg(iwl_mei_global_cldev,
1284 SAP_MSG_NOTIF_GET_NVM);
1288 mutex_unlock(&iwl_mei_mutex);
1290 ret = wait_event_timeout(mei->get_nvm_wq, mei->nvm, 2 * HZ);
1294 mutex_lock(&iwl_mei_mutex);
1296 if (!iwl_mei_is_connected())
1299 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1305 nvm = kmemdup(mei->nvm, sizeof(*mei->nvm), GFP_KERNEL);
1308 mutex_unlock(&iwl_mei_mutex);
1311 EXPORT_SYMBOL_GPL(iwl_mei_get_nvm);
1313 int iwl_mei_get_ownership(void)
1315 struct iwl_mei *mei;
1318 mutex_lock(&iwl_mei_mutex);
1320 /* In case we didn't have a bind */
1321 if (!iwl_mei_is_connected()) {
1326 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1333 if (!mei->amt_enabled) {
1338 if (mei->got_ownership) {
1343 ret = iwl_mei_send_sap_msg(mei->cldev,
1344 SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
1348 mutex_unlock(&iwl_mei_mutex);
1350 ret = wait_event_timeout(mei->get_ownership_wq,
1351 mei->got_ownership, HZ / 2);
1355 mutex_lock(&iwl_mei_mutex);
1357 /* In case we didn't have a bind */
1358 if (!iwl_mei_is_connected()) {
1363 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1370 ret = !mei->got_ownership;
1373 mutex_unlock(&iwl_mei_mutex);
1376 EXPORT_SYMBOL_GPL(iwl_mei_get_ownership);
1378 void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
1379 const struct iwl_mei_colloc_info *colloc_info)
1381 struct iwl_sap_notif_host_link_up msg = {
1382 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
1383 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1385 .ssid_len = cpu_to_le32(conn_info->ssid_len),
1386 .channel = conn_info->channel,
1387 .band = conn_info->band,
1388 .pairwise_cipher = cpu_to_le32(conn_info->pairwise_cipher),
1389 .auth_mode = cpu_to_le32(conn_info->auth_mode),
1392 struct iwl_mei *mei;
1394 if (conn_info->ssid_len > ARRAY_SIZE(msg.conn_info.ssid))
1397 memcpy(msg.conn_info.ssid, conn_info->ssid, conn_info->ssid_len);
1398 memcpy(msg.conn_info.bssid, conn_info->bssid, ETH_ALEN);
1401 msg.colloc_channel = colloc_info->channel;
1402 msg.colloc_band = colloc_info->channel <= 14 ? 0 : 1;
1403 memcpy(msg.colloc_bssid, colloc_info->bssid, ETH_ALEN);
1406 mutex_lock(&iwl_mei_mutex);
1408 if (!iwl_mei_is_connected())
1411 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1416 if (!mei->amt_enabled)
1419 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1422 kfree(iwl_mei_cache.conn_info);
1423 iwl_mei_cache.conn_info =
1424 kmemdup(&msg.conn_info, sizeof(msg.conn_info), GFP_KERNEL);
1425 mutex_unlock(&iwl_mei_mutex);
1427 EXPORT_SYMBOL_GPL(iwl_mei_host_associated);
1429 void iwl_mei_host_disassociated(void)
1431 struct iwl_mei *mei;
1432 struct iwl_sap_notif_host_link_down msg = {
1433 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_DOWN),
1434 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1435 .type = HOST_LINK_DOWN_TYPE_LONG,
1438 mutex_lock(&iwl_mei_mutex);
1440 if (!iwl_mei_is_connected())
1443 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1448 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1451 kfree(iwl_mei_cache.conn_info);
1452 iwl_mei_cache.conn_info = NULL;
1453 mutex_unlock(&iwl_mei_mutex);
1455 EXPORT_SYMBOL_GPL(iwl_mei_host_disassociated);
1457 void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill)
1459 struct iwl_mei *mei;
1460 u32 rfkill_state = 0;
1461 struct iwl_sap_msg_dw msg = {
1462 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
1463 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1467 rfkill_state |= SAP_SW_RFKILL_DEASSERTED;
1470 rfkill_state |= SAP_HW_RFKILL_DEASSERTED;
1472 mutex_lock(&iwl_mei_mutex);
1474 if (!iwl_mei_is_connected())
1477 msg.val = cpu_to_le32(rfkill_state);
1479 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1484 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1487 iwl_mei_cache.rf_kill = rfkill_state;
1488 mutex_unlock(&iwl_mei_mutex);
1490 EXPORT_SYMBOL_GPL(iwl_mei_set_rfkill_state);
1492 void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address)
1494 struct iwl_mei *mei;
1495 struct iwl_sap_notif_host_nic_info msg = {
1496 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
1497 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1500 mutex_lock(&iwl_mei_mutex);
1502 if (!iwl_mei_is_connected())
1505 ether_addr_copy(msg.mac_address, mac_address);
1506 ether_addr_copy(msg.nvm_address, nvm_address);
1508 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1513 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1516 ether_addr_copy(iwl_mei_cache.mac_address, mac_address);
1517 ether_addr_copy(iwl_mei_cache.nvm_address, nvm_address);
1518 mutex_unlock(&iwl_mei_mutex);
1520 EXPORT_SYMBOL_GPL(iwl_mei_set_nic_info);
1522 void iwl_mei_set_country_code(u16 mcc)
1524 struct iwl_mei *mei;
1525 struct iwl_sap_notif_country_code msg = {
1526 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
1527 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1528 .mcc = cpu_to_le16(mcc),
1531 mutex_lock(&iwl_mei_mutex);
1533 if (!iwl_mei_is_connected())
1536 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1541 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1544 iwl_mei_cache.mcc = mcc;
1545 mutex_unlock(&iwl_mei_mutex);
1547 EXPORT_SYMBOL_GPL(iwl_mei_set_country_code);
1549 void iwl_mei_set_power_limit(const __le16 *power_limit)
1551 struct iwl_mei *mei;
1552 struct iwl_sap_notif_sar_limits msg = {
1553 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
1554 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1557 mutex_lock(&iwl_mei_mutex);
1559 if (!iwl_mei_is_connected())
1562 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1567 memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table));
1569 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1572 kfree(iwl_mei_cache.power_limit);
1573 iwl_mei_cache.power_limit = kmemdup(power_limit,
1574 sizeof(msg.sar_chain_info_table), GFP_KERNEL);
1575 mutex_unlock(&iwl_mei_mutex);
1577 EXPORT_SYMBOL_GPL(iwl_mei_set_power_limit);
1579 void iwl_mei_set_netdev(struct net_device *netdev)
1581 struct iwl_mei *mei;
1583 mutex_lock(&iwl_mei_mutex);
1585 if (!iwl_mei_is_connected()) {
1586 rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1590 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1596 struct net_device *dev =
1597 rcu_dereference_protected(iwl_mei_cache.netdev,
1598 lockdep_is_held(&iwl_mei_mutex));
1603 netdev_rx_handler_unregister(dev);
1606 rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1608 if (netdev && mei->amt_enabled)
1609 netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
1612 mutex_unlock(&iwl_mei_mutex);
1614 EXPORT_SYMBOL_GPL(iwl_mei_set_netdev);
1616 void iwl_mei_device_down(void)
1618 struct iwl_mei *mei;
1620 mutex_lock(&iwl_mei_mutex);
1622 if (!iwl_mei_is_connected())
1625 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1630 if (!mei->csme_taking_ownership)
1633 iwl_mei_send_sap_msg(mei->cldev,
1634 SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
1635 mei->csme_taking_ownership = false;
1637 mutex_unlock(&iwl_mei_mutex);
1639 EXPORT_SYMBOL_GPL(iwl_mei_device_down);
1641 int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
1646 * We must have a non-NULL priv pointer to not crash when there are
1647 * multiple WiFi devices.
1652 mutex_lock(&iwl_mei_mutex);
1654 /* do not allow registration if someone else already registered */
1655 if (iwl_mei_cache.priv || iwl_mei_cache.ops) {
1660 iwl_mei_cache.priv = priv;
1661 iwl_mei_cache.ops = ops;
1663 if (iwl_mei_global_cldev) {
1664 struct iwl_mei *mei =
1665 mei_cldev_get_drvdata(iwl_mei_global_cldev);
1667 /* we have already a SAP connection */
1668 if (iwl_mei_is_connected()) {
1669 iwl_mei_send_sap_msg(mei->cldev,
1670 SAP_MSG_NOTIF_WIFIDR_UP);
1671 ops->rfkill(priv, mei->link_prot_state);
1677 mutex_unlock(&iwl_mei_mutex);
1680 EXPORT_SYMBOL_GPL(iwl_mei_register);
1682 void iwl_mei_start_unregister(void)
1684 mutex_lock(&iwl_mei_mutex);
1686 /* At this point, the wifi driver should have removed the netdev */
1687 if (rcu_access_pointer(iwl_mei_cache.netdev))
1688 pr_err("Still had a netdev pointer set upon unregister\n");
1690 kfree(iwl_mei_cache.conn_info);
1691 iwl_mei_cache.conn_info = NULL;
1692 kfree(iwl_mei_cache.power_limit);
1693 iwl_mei_cache.power_limit = NULL;
1694 iwl_mei_cache.ops = NULL;
1695 /* leave iwl_mei_cache.priv non-NULL to prevent any new registration */
1697 mutex_unlock(&iwl_mei_mutex);
1699 EXPORT_SYMBOL_GPL(iwl_mei_start_unregister);
1701 void iwl_mei_unregister_complete(void)
1703 mutex_lock(&iwl_mei_mutex);
1705 iwl_mei_cache.priv = NULL;
1707 if (iwl_mei_global_cldev) {
1708 struct iwl_mei *mei =
1709 mei_cldev_get_drvdata(iwl_mei_global_cldev);
1711 iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_DOWN);
1712 mei->got_ownership = false;
1715 mutex_unlock(&iwl_mei_mutex);
1717 EXPORT_SYMBOL_GPL(iwl_mei_unregister_complete);
1719 #if IS_ENABLED(CONFIG_DEBUG_FS)
1722 iwl_mei_dbgfs_send_start_message_write(struct file *file,
1723 const char __user *user_buf,
1724 size_t count, loff_t *ppos)
1728 mutex_lock(&iwl_mei_mutex);
1730 if (!iwl_mei_global_cldev) {
1735 ret = iwl_mei_send_start(iwl_mei_global_cldev);
1738 mutex_unlock(&iwl_mei_mutex);
1739 return ret ?: count;
1742 static const struct file_operations iwl_mei_dbgfs_send_start_message_ops = {
1743 .write = iwl_mei_dbgfs_send_start_message_write,
1744 .open = simple_open,
1745 .llseek = default_llseek,
1748 static ssize_t iwl_mei_dbgfs_req_ownership_write(struct file *file,
1749 const char __user *user_buf,
1750 size_t count, loff_t *ppos)
1752 iwl_mei_get_ownership();
1757 static const struct file_operations iwl_mei_dbgfs_req_ownership_ops = {
1758 .write = iwl_mei_dbgfs_req_ownership_write,
1759 .open = simple_open,
1760 .llseek = default_llseek,
1763 static void iwl_mei_dbgfs_register(struct iwl_mei *mei)
1765 mei->dbgfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1767 if (!mei->dbgfs_dir)
1770 debugfs_create_ulong("status", S_IRUSR,
1771 mei->dbgfs_dir, &iwl_mei_status);
1772 debugfs_create_file("send_start_message", S_IWUSR, mei->dbgfs_dir,
1773 mei, &iwl_mei_dbgfs_send_start_message_ops);
1774 debugfs_create_file("req_ownership", S_IWUSR, mei->dbgfs_dir,
1775 mei, &iwl_mei_dbgfs_req_ownership_ops);
1778 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei)
1780 debugfs_remove_recursive(mei->dbgfs_dir);
1781 mei->dbgfs_dir = NULL;
1786 static void iwl_mei_dbgfs_register(struct iwl_mei *mei) {}
1787 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
1789 #endif /* CONFIG_DEBUG_FS */
1791 #define ALLOC_SHARED_MEM_RETRY_MAX_NUM 3
1794 * iwl_mei_probe - the probe function called by the mei bus enumeration
1796 * This allocates the data needed by iwlmei and sets a pointer to this data
1797 * into the mei_cl_device's drvdata.
1798 * It starts the SAP protocol by sending the SAP_ME_MSG_START without
1799 * waiting for the answer. The answer will be caught later by the Rx callback.
1801 static int iwl_mei_probe(struct mei_cl_device *cldev,
1802 const struct mei_cl_device_id *id)
1804 int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM;
1805 struct iwl_mei *mei;
1808 mei = devm_kzalloc(&cldev->dev, sizeof(*mei), GFP_KERNEL);
1812 init_waitqueue_head(&mei->get_nvm_wq);
1813 INIT_WORK(&mei->send_csa_msg_wk, iwl_mei_send_csa_msg_wk);
1814 INIT_DELAYED_WORK(&mei->csa_throttle_end_wk,
1815 iwl_mei_csa_throttle_end_wk);
1816 init_waitqueue_head(&mei->get_ownership_wq);
1817 spin_lock_init(&mei->data_q_lock);
1819 mei_cldev_set_drvdata(cldev, mei);
1823 ret = iwl_mei_alloc_shared_mem(cldev);
1827 * The CSME firmware needs to boot the internal WLAN client.
1828 * This can take time in certain configurations (usually
1829 * upon resume and when the whole CSME firmware is shut down
1832 * Wait a bit before retrying and hope we'll succeed next time.
1835 dev_dbg(&cldev->dev,
1836 "Couldn't allocate the shared memory: %d, attempt %d / %d\n",
1837 ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM);
1840 } while (alloc_retry);
1843 dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
1848 iwl_mei_init_shared_mem(mei);
1850 ret = iwl_mei_enable(cldev);
1852 goto free_shared_mem;
1854 iwl_mei_dbgfs_register(mei);
1857 * We now have a Rx function in place, start the SAP procotol
1858 * we expect to get the SAP_ME_MSG_START_OK response later on.
1860 mutex_lock(&iwl_mei_mutex);
1861 ret = iwl_mei_send_start(cldev);
1862 mutex_unlock(&iwl_mei_mutex);
1864 goto debugfs_unregister;
1867 iwl_mei_global_cldev = cldev;
1872 iwl_mei_dbgfs_unregister(mei);
1873 mei_cldev_disable(cldev);
1875 iwl_mei_free_shared_mem(cldev);
1877 mei_cldev_set_drvdata(cldev, NULL);
1878 devm_kfree(&cldev->dev, mei);
1883 #define SEND_SAP_MAX_WAIT_ITERATION 10
1885 static void iwl_mei_remove(struct mei_cl_device *cldev)
1887 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1891 * We are being removed while the bus is active, it means we are
1892 * going to suspend/ shutdown, so the NIC will disappear.
1894 if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops)
1895 iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv);
1897 if (rcu_access_pointer(iwl_mei_cache.netdev)) {
1898 struct net_device *dev;
1901 * First take rtnl and only then the mutex to avoid an ABBA
1902 * with iwl_mei_set_netdev()
1905 mutex_lock(&iwl_mei_mutex);
1908 * If we are suspending and the wifi driver hasn't removed it's netdev
1909 * yet, do it now. In any case, don't change the cache.netdev pointer.
1911 dev = rcu_dereference_protected(iwl_mei_cache.netdev,
1912 lockdep_is_held(&iwl_mei_mutex));
1914 netdev_rx_handler_unregister(dev);
1915 mutex_unlock(&iwl_mei_mutex);
1919 mutex_lock(&iwl_mei_mutex);
1922 * Tell CSME that we are going down so that it won't access the
1923 * memory anymore, make sure this message goes through immediately.
1925 mei->csa_throttled = false;
1926 iwl_mei_send_sap_msg(mei->cldev,
1927 SAP_MSG_NOTIF_HOST_GOES_DOWN);
1929 for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) {
1930 if (!iwl_mei_host_to_me_data_pending(mei))
1937 * If we couldn't make sure that CSME saw the HOST_GOES_DOWN message,
1938 * it means that it will probably keep reading memory that we are going
1939 * to unmap and free, expect IOMMU error messages.
1941 if (i == SEND_SAP_MAX_WAIT_ITERATION)
1942 dev_err(&mei->cldev->dev,
1943 "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n");
1945 mutex_unlock(&iwl_mei_mutex);
1948 * This looks strange, but this lock is taken here to make sure that
1949 * iwl_mei_add_data_to_ring called from the Tx path sees that we
1950 * clear the IWL_MEI_STATUS_SAP_CONNECTED bit.
1951 * Rx isn't a problem because the rx_handler can't be called after
1952 * having been unregistered.
1954 spin_lock_bh(&mei->data_q_lock);
1955 clear_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
1956 spin_unlock_bh(&mei->data_q_lock);
1958 if (iwl_mei_cache.ops)
1959 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
1962 * mei_cldev_disable will return only after all the MEI Rx is done.
1963 * It must be called when iwl_mei_mutex is *not* held, since it waits
1964 * for our Rx handler to complete.
1965 * After it returns, no new Rx will start.
1967 mei_cldev_disable(cldev);
1970 * Since the netdev was already removed and the netdev's removal
1971 * includes a call to synchronize_net() so that we know there won't be
1972 * any new Rx that will trigger the following workers.
1974 cancel_work_sync(&mei->send_csa_msg_wk);
1975 cancel_delayed_work_sync(&mei->csa_throttle_end_wk);
1978 * If someone waits for the ownership, let him know that we are going
1979 * down and that we are not connected anymore. He'll be able to take
1982 wake_up_all(&mei->get_ownership_wq);
1984 mutex_lock(&iwl_mei_mutex);
1986 iwl_mei_global_cldev = NULL;
1988 wake_up_all(&mei->get_nvm_wq);
1990 iwl_mei_free_shared_mem(cldev);
1992 iwl_mei_dbgfs_unregister(mei);
1994 mei_cldev_set_drvdata(cldev, NULL);
1998 kfree(rcu_access_pointer(mei->filters));
2000 devm_kfree(&cldev->dev, mei);
2002 mutex_unlock(&iwl_mei_mutex);
2005 static const struct mei_cl_device_id iwl_mei_tbl[] = {
2006 { KBUILD_MODNAME, MEI_WLAN_UUID, MEI_CL_VERSION_ANY},
2008 /* required last entry */
2013 * Do not export the device table because this module is loaded by
2014 * iwlwifi's dependency.
2017 static struct mei_cl_driver iwl_mei_cl_driver = {
2018 .id_table = iwl_mei_tbl,
2019 .name = KBUILD_MODNAME,
2020 .probe = iwl_mei_probe,
2021 .remove = iwl_mei_remove,
2024 module_mei_cl_driver(iwl_mei_cl_driver);