Merge tag 'mtd/fixes-for-5.17-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / net / wireless / intel / iwlwifi / mei / main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021-2022 Intel Corporation
4  */
5
6 #include <linux/etherdevice.h>
7 #include <linux/netdevice.h>
8 #include <linux/ieee80211.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/mei_cl_bus.h>
13 #include <linux/rcupdate.h>
14 #include <linux/debugfs.h>
15 #include <linux/skbuff.h>
16 #include <linux/wait.h>
17 #include <linux/slab.h>
18 #include <linux/mm.h>
19
20 #include <net/cfg80211.h>
21
22 #include "internal.h"
23 #include "iwl-mei.h"
24 #include "trace.h"
25 #include "trace-data.h"
26 #include "sap.h"
27
28 MODULE_DESCRIPTION("The Intel(R) wireless / CSME firmware interface");
29 MODULE_LICENSE("GPL");
30
31 #define MEI_WLAN_UUID UUID_LE(0x13280904, 0x7792, 0x4fcb, \
32                               0xa1, 0xaa, 0x5e, 0x70, 0xcb, 0xb1, 0xe8, 0x65)
33
34 /*
35  * Since iwlwifi calls iwlmei without any context, hold a pointer to the
36  * mei_cl_device structure here.
37  * Define a mutex that will synchronize all the flows between iwlwifi and
38  * iwlmei.
39  * Note that iwlmei can't have several instances, so it ok to have static
40  * variables here.
41  */
42 static struct mei_cl_device *iwl_mei_global_cldev;
43 static DEFINE_MUTEX(iwl_mei_mutex);
44 static unsigned long iwl_mei_status;
45
46 enum iwl_mei_status_bits {
47         IWL_MEI_STATUS_SAP_CONNECTED,
48 };
49
50 bool iwl_mei_is_connected(void)
51 {
52         return test_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
53 }
54 EXPORT_SYMBOL_GPL(iwl_mei_is_connected);
55
56 #define SAP_VERSION     3
57 #define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */
58
59 struct iwl_sap_q_ctrl_blk {
60         __le32 wr_ptr;
61         __le32 rd_ptr;
62         __le32 size;
63 };
64
65 enum iwl_sap_q_idx {
66         SAP_QUEUE_IDX_NOTIF = 0,
67         SAP_QUEUE_IDX_DATA,
68         SAP_QUEUE_IDX_MAX,
69 };
70
71 struct iwl_sap_dir {
72         __le32 reserved;
73         struct iwl_sap_q_ctrl_blk q_ctrl_blk[SAP_QUEUE_IDX_MAX];
74 };
75
76 enum iwl_sap_dir_idx {
77         SAP_DIRECTION_HOST_TO_ME = 0,
78         SAP_DIRECTION_ME_TO_HOST,
79         SAP_DIRECTION_MAX,
80 };
81
82 struct iwl_sap_shared_mem_ctrl_blk {
83         __le32 sap_id;
84         __le32 size;
85         struct iwl_sap_dir dir[SAP_DIRECTION_MAX];
86 };
87
88 /*
89  * The shared area has the following layout:
90  *
91  * +-----------------------------------+
92  * |struct iwl_sap_shared_mem_ctrl_blk |
93  * +-----------------------------------+
94  * |Host -> ME data queue              |
95  * +-----------------------------------+
96  * |Host -> ME notif queue             |
97  * +-----------------------------------+
98  * |ME -> Host data queue              |
99  * +-----------------------------------+
100  * |ME -> host notif queue             |
101  * +-----------------------------------+
102  * |SAP control block id (SAP!)        |
103  * +-----------------------------------+
104  */
105
106 #define SAP_H2M_DATA_Q_SZ       48256
107 #define SAP_M2H_DATA_Q_SZ       24128
108 #define SAP_H2M_NOTIF_Q_SZ      2240
109 #define SAP_M2H_NOTIF_Q_SZ      62720
110
111 #define _IWL_MEI_SAP_SHARED_MEM_SZ \
112         (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \
113          SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \
114          SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4)
115
116 #define IWL_MEI_SAP_SHARED_MEM_SZ \
117         (roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE))
118
119 struct iwl_mei_shared_mem_ptrs {
120         struct iwl_sap_shared_mem_ctrl_blk *ctrl;
121         void *q_head[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
122         size_t q_size[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
123 };
124
125 struct iwl_mei_filters {
126         struct rcu_head rcu_head;
127         struct iwl_sap_oob_filters filters;
128 };
129
130 /**
131  * struct iwl_mei - holds the private date for iwl_mei
132  *
133  * @get_nvm_wq: the wait queue for the get_nvm flow
134  * @send_csa_msg_wk: used to defer the transmission of the CHECK_SHARED_AREA
135  *      message. Used so that we can send CHECK_SHARED_AREA from atomic
136  *      contexts.
137  * @get_ownership_wq: the wait queue for the get_ownership_flow
138  * @shared_mem: the memory that is shared between CSME and the host
139  * @cldev: the pointer to the MEI client device
140  * @nvm: the data returned by the CSME for the NVM
141  * @filters: the filters sent by CSME
142  * @got_ownership: true if we own the device
143  * @amt_enabled: true if CSME has wireless enabled
144  * @csa_throttled: when true, we can't send CHECK_SHARED_AREA over the MEI
145  *      bus, but rather need to wait until send_csa_msg_wk runs
146  * @csme_taking_ownership: true when CSME is taking ownership. Used to remember
147  *      to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
148  *      flow.
149  * @link_prot_state: true when we are in link protection PASSIVE
150  * @csa_throttle_end_wk: used when &csa_throttled is true
151  * @data_q_lock: protects the access to the data queues which are
152  *      accessed without the mutex.
153  * @sap_seq_no: the sequence number for the SAP messages
154  * @seq_no: the sequence number for the SAP messages
155  * @dbgfs_dir: the debugfs dir entry
156  */
157 struct iwl_mei {
158         wait_queue_head_t get_nvm_wq;
159         struct work_struct send_csa_msg_wk;
160         wait_queue_head_t get_ownership_wq;
161         struct iwl_mei_shared_mem_ptrs shared_mem;
162         struct mei_cl_device *cldev;
163         struct iwl_mei_nvm *nvm;
164         struct iwl_mei_filters __rcu *filters;
165         bool got_ownership;
166         bool amt_enabled;
167         bool csa_throttled;
168         bool csme_taking_ownership;
169         bool link_prot_state;
170         struct delayed_work csa_throttle_end_wk;
171         spinlock_t data_q_lock;
172
173         atomic_t sap_seq_no;
174         atomic_t seq_no;
175
176         struct dentry *dbgfs_dir;
177 };
178
179 /**
180  * struct iwl_mei_cache - cache for the parameters from iwlwifi
181  * @ops: Callbacks to iwlwifi.
182  * @netdev: The netdev that will be used to transmit / receive packets.
183  * @conn_info: The connection info message triggered by iwlwifi's association.
184  * @power_limit: pointer to an array of 10 elements (le16) represents the power
185  *      restrictions per chain.
186  * @rf_kill: rf kill state.
187  * @mcc: MCC info
188  * @mac_address: interface MAC address.
189  * @nvm_address: NVM MAC address.
190  * @priv: A pointer to iwlwifi.
191  *
192  * This used to cache the configurations coming from iwlwifi's way. The data
193  * is cached here so that we can buffer the configuration even if we don't have
194  * a bind from the mei bus and hence, on iwl_mei structure.
195  */
196 struct iwl_mei_cache {
197         const struct iwl_mei_ops *ops;
198         struct net_device __rcu *netdev;
199         const struct iwl_sap_notif_connection_info *conn_info;
200         const __le16 *power_limit;
201         u32 rf_kill;
202         u16 mcc;
203         u8 mac_address[6];
204         u8 nvm_address[6];
205         void *priv;
206 };
207
208 static struct iwl_mei_cache iwl_mei_cache = {
209         .rf_kill = SAP_HW_RFKILL_DEASSERTED | SAP_SW_RFKILL_DEASSERTED
210 };
211
212 static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev)
213 {
214         struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
215
216         if (mei_cldev_dma_unmap(cldev))
217                 dev_err(&cldev->dev, "Couldn't unmap the shared mem properly\n");
218         memset(&mei->shared_mem, 0, sizeof(mei->shared_mem));
219 }
220
221 #define HBM_DMA_BUF_ID_WLAN 1
222
223 static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
224 {
225         struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
226         struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
227
228         mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN,
229                                        IWL_MEI_SAP_SHARED_MEM_SZ);
230
231         if (IS_ERR(mem->ctrl)) {
232                 int ret = PTR_ERR(mem->ctrl);
233
234                 mem->ctrl = NULL;
235
236                 return ret;
237         }
238
239         memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ);
240
241         return 0;
242 }
243
244 static void iwl_mei_init_shared_mem(struct iwl_mei *mei)
245 {
246         struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
247         struct iwl_sap_dir *h2m;
248         struct iwl_sap_dir *m2h;
249         int dir, queue;
250         u8 *q_head;
251
252         mem->ctrl->sap_id = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
253
254         mem->ctrl->size = cpu_to_le32(sizeof(*mem->ctrl));
255
256         h2m = &mem->ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
257         m2h = &mem->ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
258
259         h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
260                 cpu_to_le32(SAP_H2M_DATA_Q_SZ);
261         h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
262                 cpu_to_le32(SAP_H2M_NOTIF_Q_SZ);
263         m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
264                 cpu_to_le32(SAP_M2H_DATA_Q_SZ);
265         m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
266                 cpu_to_le32(SAP_M2H_NOTIF_Q_SZ);
267
268         /* q_head points to the start of the first queue */
269         q_head = (void *)(mem->ctrl + 1);
270
271         /* Initialize the queue heads */
272         for (dir = 0; dir < SAP_DIRECTION_MAX; dir++) {
273                 for (queue = 0; queue < SAP_QUEUE_IDX_MAX; queue++) {
274                         mem->q_head[dir][queue] = q_head;
275                         q_head +=
276                                 le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
277                         mem->q_size[dir][queue] =
278                                 le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
279                 }
280         }
281
282         *(__le32 *)q_head = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
283 }
284
285 static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev,
286                                         struct iwl_sap_q_ctrl_blk *notif_q,
287                                         u8 *q_head,
288                                         const struct iwl_sap_hdr *hdr,
289                                         u32 q_sz)
290 {
291         u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
292         u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
293         size_t room_in_buf;
294         size_t tx_sz = sizeof(*hdr) + le16_to_cpu(hdr->len);
295
296         if (rd > q_sz || wr > q_sz) {
297                 dev_err(&cldev->dev,
298                         "Pointers are past the end of the buffer\n");
299                 return -EINVAL;
300         }
301
302         room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
303
304         /* we don't have enough room for the data to write */
305         if (room_in_buf < tx_sz) {
306                 dev_err(&cldev->dev,
307                         "Not enough room in the buffer\n");
308                 return -ENOSPC;
309         }
310
311         if (wr + tx_sz <= q_sz) {
312                 memcpy(q_head + wr, hdr, tx_sz);
313         } else {
314                 memcpy(q_head + wr, hdr, q_sz - wr);
315                 memcpy(q_head, (u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
316         }
317
318         WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
319         return 0;
320 }
321
322 static bool iwl_mei_host_to_me_data_pending(const struct iwl_mei *mei)
323 {
324         struct iwl_sap_q_ctrl_blk *notif_q;
325         struct iwl_sap_dir *dir;
326
327         dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
328         notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
329
330         if (READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr))
331                 return true;
332
333         notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
334         return READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr);
335 }
336
337 static int iwl_mei_send_check_shared_area(struct mei_cl_device *cldev)
338 {
339         struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
340         struct iwl_sap_me_msg_start msg = {
341                 .hdr.type = cpu_to_le32(SAP_ME_MSG_CHECK_SHARED_AREA),
342                 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
343         };
344         int ret;
345
346         lockdep_assert_held(&iwl_mei_mutex);
347
348         if (mei->csa_throttled)
349                 return 0;
350
351         trace_iwlmei_me_msg(&msg.hdr, true);
352         ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
353         if (ret != sizeof(msg)) {
354                 dev_err(&cldev->dev,
355                         "failed to send the SAP_ME_MSG_CHECK_SHARED_AREA message %d\n",
356                         ret);
357                 return ret;
358         }
359
360         mei->csa_throttled = true;
361
362         schedule_delayed_work(&mei->csa_throttle_end_wk,
363                               msecs_to_jiffies(100));
364
365         return 0;
366 }
367
368 static void iwl_mei_csa_throttle_end_wk(struct work_struct *wk)
369 {
370         struct iwl_mei *mei =
371                 container_of(wk, struct iwl_mei, csa_throttle_end_wk.work);
372
373         mutex_lock(&iwl_mei_mutex);
374
375         mei->csa_throttled = false;
376
377         if (iwl_mei_host_to_me_data_pending(mei))
378                 iwl_mei_send_check_shared_area(mei->cldev);
379
380         mutex_unlock(&iwl_mei_mutex);
381 }
382
383 static int iwl_mei_send_sap_msg_payload(struct mei_cl_device *cldev,
384                                         struct iwl_sap_hdr *hdr)
385 {
386         struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
387         struct iwl_sap_q_ctrl_blk *notif_q;
388         struct iwl_sap_dir *dir;
389         void *q_head;
390         u32 q_sz;
391         int ret;
392
393         lockdep_assert_held(&iwl_mei_mutex);
394
395         if (!mei->shared_mem.ctrl) {
396                 dev_err(&cldev->dev,
397                         "No shared memory, can't send any SAP message\n");
398                 return -EINVAL;
399         }
400
401         if (!iwl_mei_is_connected()) {
402                 dev_err(&cldev->dev,
403                         "Can't send a SAP message if we're not connected\n");
404                 return -ENODEV;
405         }
406
407         hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
408         dev_dbg(&cldev->dev, "Sending %d\n", hdr->type);
409
410         dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
411         notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
412         q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
413         q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
414         ret = iwl_mei_write_cyclic_buf(q_head, notif_q, q_head, hdr, q_sz);
415
416         if (ret < 0)
417                 return ret;
418
419         trace_iwlmei_sap_cmd(hdr, true);
420
421         return iwl_mei_send_check_shared_area(cldev);
422 }
423
424 void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
425 {
426         struct iwl_sap_q_ctrl_blk *notif_q;
427         struct iwl_sap_dir *dir;
428         struct iwl_mei *mei;
429         size_t room_in_buf;
430         size_t tx_sz;
431         size_t hdr_sz;
432         u32 q_sz;
433         u32 rd;
434         u32 wr;
435         void *q_head;
436
437         if (!iwl_mei_global_cldev)
438                 return;
439
440         mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
441
442         /*
443          * We access this path for Rx packets (the more common case)
444          * and from Tx path when we send DHCP packets, the latter is
445          * very unlikely.
446          * Take the lock already here to make sure we see that remove()
447          * might have cleared the IWL_MEI_STATUS_SAP_CONNECTED bit.
448          */
449         spin_lock_bh(&mei->data_q_lock);
450
451         if (!iwl_mei_is_connected()) {
452                 spin_unlock_bh(&mei->data_q_lock);
453                 return;
454         }
455
456         /*
457          * We are in a RCU critical section and the remove from the CSME bus
458          * which would free this memory waits for the readers to complete (this
459          * is done in netdev_rx_handler_unregister).
460          */
461         dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
462         notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
463         q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
464         q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
465
466         rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
467         wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
468         hdr_sz = cb_tx ? sizeof(struct iwl_sap_cb_data) :
469                          sizeof(struct iwl_sap_hdr);
470         tx_sz = skb->len + hdr_sz;
471
472         if (rd > q_sz || wr > q_sz) {
473                 dev_err(&mei->cldev->dev,
474                         "can't write the data: pointers are past the end of the buffer\n");
475                 goto out;
476         }
477
478         room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
479
480         /* we don't have enough room for the data to write */
481         if (room_in_buf < tx_sz) {
482                 dev_err(&mei->cldev->dev,
483                         "Not enough room in the buffer for this data\n");
484                 goto out;
485         }
486
487         if (skb_headroom(skb) < hdr_sz) {
488                 dev_err(&mei->cldev->dev,
489                         "Not enough headroom in the skb to write the SAP header\n");
490                 goto out;
491         }
492
493         if (cb_tx) {
494                 struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr));
495
496                 cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET);
497                 cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr));
498                 cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
499                 cb_hdr->to_me_filt_status = cpu_to_le32(BIT(CB_TX_DHCP_FILT_IDX));
500                 cb_hdr->data_len = cpu_to_le32(skb->len - sizeof(*cb_hdr));
501                 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DHCP);
502         } else {
503                 struct iwl_sap_hdr *hdr = skb_push(skb, sizeof(*hdr));
504
505                 hdr->type = cpu_to_le16(SAP_MSG_DATA_PACKET);
506                 hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
507                 hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
508                 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DATA_FROM_AIR);
509         }
510
511         if (wr + tx_sz <= q_sz) {
512                 skb_copy_bits(skb, 0, q_head + wr, tx_sz);
513         } else {
514                 skb_copy_bits(skb, 0, q_head + wr, q_sz - wr);
515                 skb_copy_bits(skb, q_sz - wr, q_head, tx_sz - (q_sz - wr));
516         }
517
518         WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
519
520 out:
521         spin_unlock_bh(&mei->data_q_lock);
522 }
523
524 static int
525 iwl_mei_send_sap_msg(struct mei_cl_device *cldev, u16 type)
526 {
527         struct iwl_sap_hdr msg = {
528                 .type = cpu_to_le16(type),
529         };
530
531         return iwl_mei_send_sap_msg_payload(cldev, &msg);
532 }
533
534 static void iwl_mei_send_csa_msg_wk(struct work_struct *wk)
535 {
536         struct iwl_mei *mei =
537                 container_of(wk, struct iwl_mei, send_csa_msg_wk);
538
539         if (!iwl_mei_is_connected())
540                 return;
541
542         mutex_lock(&iwl_mei_mutex);
543
544         iwl_mei_send_check_shared_area(mei->cldev);
545
546         mutex_unlock(&iwl_mei_mutex);
547 }
548
549 /* Called in a RCU read critical section from netif_receive_skb */
550 static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb)
551 {
552         struct sk_buff *skb = *pskb;
553         struct iwl_mei *mei =
554                 rcu_dereference(skb->dev->rx_handler_data);
555         struct iwl_mei_filters *filters = rcu_dereference(mei->filters);
556         bool rx_for_csme = false;
557         rx_handler_result_t res;
558
559         /*
560          * remove() unregisters this handler and synchronize_net, so this
561          * should never happen.
562          */
563         if (!iwl_mei_is_connected()) {
564                 dev_err(&mei->cldev->dev,
565                         "Got an Rx packet, but we're not connected to SAP?\n");
566                 return RX_HANDLER_PASS;
567         }
568
569         if (filters)
570                 res = iwl_mei_rx_filter(skb, &filters->filters, &rx_for_csme);
571         else
572                 res = RX_HANDLER_PASS;
573
574         /*
575          * The data is already on the ring of the shared area, all we
576          * need to do is to tell the CSME firmware to check what we have
577          * there.
578          */
579         if (rx_for_csme)
580                 schedule_work(&mei->send_csa_msg_wk);
581
582         if (res != RX_HANDLER_PASS) {
583                 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_DROPPED_FROM_AIR);
584                 dev_kfree_skb(skb);
585         }
586
587         return res;
588 }
589
590 static void
591 iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
592                            const struct iwl_sap_me_msg_start_ok *rsp,
593                            ssize_t len)
594 {
595         struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
596
597         if (len != sizeof(*rsp)) {
598                 dev_err(&cldev->dev,
599                         "got invalid SAP_ME_MSG_START_OK from CSME firmware\n");
600                 dev_err(&cldev->dev,
601                         "size is incorrect: %zd instead of %zu\n",
602                         len, sizeof(*rsp));
603                 return;
604         }
605
606         if (rsp->supported_version != SAP_VERSION) {
607                 dev_err(&cldev->dev,
608                         "didn't get the expected version: got %d\n",
609                         rsp->supported_version);
610                 return;
611         }
612
613         mutex_lock(&iwl_mei_mutex);
614         set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
615         /* wifi driver has registered already */
616         if (iwl_mei_cache.ops) {
617                 iwl_mei_send_sap_msg(mei->cldev,
618                                      SAP_MSG_NOTIF_WIFIDR_UP);
619                 iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv);
620         }
621
622         mutex_unlock(&iwl_mei_mutex);
623 }
624
625 static void iwl_mei_handle_csme_filters(struct mei_cl_device *cldev,
626                                         const struct iwl_sap_csme_filters *filters)
627 {
628         struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
629         struct iwl_mei_filters *new_filters;
630         struct iwl_mei_filters *old_filters;
631
632         old_filters =
633                 rcu_dereference_protected(mei->filters,
634                                           lockdep_is_held(&iwl_mei_mutex));
635
636         new_filters = kzalloc(sizeof(*new_filters), GFP_KERNEL);
637         if (!new_filters)
638                 return;
639
640         /* Copy the OOB filters */
641         new_filters->filters = filters->filters;
642
643         rcu_assign_pointer(mei->filters, new_filters);
644
645         if (old_filters)
646                 kfree_rcu(old_filters, rcu_head);
647 }
648
649 static void
650 iwl_mei_handle_conn_status(struct mei_cl_device *cldev,
651                            const struct iwl_sap_notif_conn_status *status)
652 {
653         struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
654         struct iwl_mei_conn_info conn_info = {
655                 .lp_state = le32_to_cpu(status->link_prot_state),
656                 .ssid_len = le32_to_cpu(status->conn_info.ssid_len),
657                 .channel = status->conn_info.channel,
658                 .band = status->conn_info.band,
659                 .auth_mode = le32_to_cpu(status->conn_info.auth_mode),
660                 .pairwise_cipher = le32_to_cpu(status->conn_info.pairwise_cipher),
661         };
662
663         if (!iwl_mei_cache.ops ||
664             conn_info.ssid_len > ARRAY_SIZE(conn_info.ssid))
665                 return;
666
667         memcpy(conn_info.ssid, status->conn_info.ssid, conn_info.ssid_len);
668         ether_addr_copy(conn_info.bssid, status->conn_info.bssid);
669
670         iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
671
672         mei->link_prot_state = status->link_prot_state;
673
674         /*
675          * Update the Rfkill state in case the host does not own the device:
676          * if we are in Link Protection, ask to not touch the device, else,
677          * unblock rfkill.
678          * If the host owns the device, inform the user space whether it can
679          * roam.
680          */
681         if (mei->got_ownership)
682                 iwl_mei_cache.ops->roaming_forbidden(iwl_mei_cache.priv,
683                                                      status->link_prot_state);
684         else
685                 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv,
686                                           status->link_prot_state);
687 }
688
689 static void iwl_mei_set_init_conf(struct iwl_mei *mei)
690 {
691         struct iwl_sap_notif_host_link_up link_msg = {
692                 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
693                 .hdr.len = cpu_to_le16(sizeof(link_msg) - sizeof(link_msg.hdr)),
694         };
695         struct iwl_sap_notif_country_code mcc_msg = {
696                 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
697                 .hdr.len = cpu_to_le16(sizeof(mcc_msg) - sizeof(mcc_msg.hdr)),
698                 .mcc = cpu_to_le16(iwl_mei_cache.mcc),
699         };
700         struct iwl_sap_notif_sar_limits sar_msg = {
701                 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
702                 .hdr.len = cpu_to_le16(sizeof(sar_msg) - sizeof(sar_msg.hdr)),
703         };
704         struct iwl_sap_notif_host_nic_info nic_info_msg = {
705                 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
706                 .hdr.len = cpu_to_le16(sizeof(nic_info_msg) - sizeof(nic_info_msg.hdr)),
707         };
708         struct iwl_sap_msg_dw rfkill_msg = {
709                 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
710                 .hdr.len = cpu_to_le16(sizeof(rfkill_msg) - sizeof(rfkill_msg.hdr)),
711                 .val = cpu_to_le32(iwl_mei_cache.rf_kill),
712         };
713
714         iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC);
715
716         if (iwl_mei_cache.conn_info) {
717                 link_msg.conn_info = *iwl_mei_cache.conn_info;
718                 iwl_mei_send_sap_msg_payload(mei->cldev, &link_msg.hdr);
719         }
720
721         iwl_mei_send_sap_msg_payload(mei->cldev, &mcc_msg.hdr);
722
723         if (iwl_mei_cache.power_limit) {
724                 memcpy(sar_msg.sar_chain_info_table, iwl_mei_cache.power_limit,
725                        sizeof(sar_msg.sar_chain_info_table));
726                 iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr);
727         }
728
729         ether_addr_copy(nic_info_msg.mac_address, iwl_mei_cache.mac_address);
730         ether_addr_copy(nic_info_msg.nvm_address, iwl_mei_cache.nvm_address);
731         iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr);
732
733         iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr);
734 }
735
736 static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
737                                      const struct iwl_sap_msg_dw *dw)
738 {
739         struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
740         struct net_device *netdev;
741
742         /*
743          * First take rtnl and only then the mutex to avoid an ABBA
744          * with iwl_mei_set_netdev()
745          */
746         rtnl_lock();
747         mutex_lock(&iwl_mei_mutex);
748
749         netdev = rcu_dereference_protected(iwl_mei_cache.netdev,
750                                            lockdep_is_held(&iwl_mei_mutex));
751
752         if (mei->amt_enabled == !!le32_to_cpu(dw->val))
753                 goto out;
754
755         mei->amt_enabled = dw->val;
756
757         if (mei->amt_enabled) {
758                 if (netdev)
759                         netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
760
761                 iwl_mei_set_init_conf(mei);
762         } else {
763                 if (iwl_mei_cache.ops)
764                         iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
765                 if (netdev)
766                         netdev_rx_handler_unregister(netdev);
767         }
768
769 out:
770         mutex_unlock(&iwl_mei_mutex);
771         rtnl_unlock();
772 }
773
774 static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev,
775                                      const struct iwl_sap_msg_dw *dw)
776 {
777         struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
778
779         mei->got_ownership = dw->val != cpu_to_le32(SAP_NIC_OWNER_ME);
780 }
781
782 static void iwl_mei_handle_can_release_ownership(struct mei_cl_device *cldev,
783                                                  const void *payload)
784 {
785         /* We can get ownership and driver is registered, go ahead */
786         if (iwl_mei_cache.ops)
787                 iwl_mei_send_sap_msg(cldev,
788                                      SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
789 }
790
791 static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
792                                                  const void *payload)
793 {
794         struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
795
796         dev_info(&cldev->dev, "CSME takes ownership\n");
797
798         mei->got_ownership = false;
799
800         /*
801          * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi driver
802          * is finished taking the device down.
803          */
804         mei->csme_taking_ownership = true;
805
806         if (iwl_mei_cache.ops)
807                 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true);
808 }
809
810 static void iwl_mei_handle_nvm(struct mei_cl_device *cldev,
811                                const struct iwl_sap_nvm *sap_nvm)
812 {
813         struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
814         const struct iwl_mei_nvm *mei_nvm = (const void *)sap_nvm;
815         int i;
816
817         kfree(mei->nvm);
818         mei->nvm = kzalloc(sizeof(*mei_nvm), GFP_KERNEL);
819         if (!mei->nvm)
820                 return;
821
822         ether_addr_copy(mei->nvm->hw_addr, sap_nvm->hw_addr);
823         mei->nvm->n_hw_addrs = sap_nvm->n_hw_addrs;
824         mei->nvm->radio_cfg = le32_to_cpu(sap_nvm->radio_cfg);
825         mei->nvm->caps = le32_to_cpu(sap_nvm->caps);
826         mei->nvm->nvm_version = le32_to_cpu(sap_nvm->nvm_version);
827
828         for (i = 0; i < ARRAY_SIZE(mei->nvm->channels); i++)
829                 mei->nvm->channels[i] = le32_to_cpu(sap_nvm->channels[i]);
830
831         wake_up_all(&mei->get_nvm_wq);
832 }
833
834 static void iwl_mei_handle_rx_host_own_req(struct mei_cl_device *cldev,
835                                            const struct iwl_sap_msg_dw *dw)
836 {
837         struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
838
839         /*
840          * This means that we can't use the wifi device right now, CSME is not
841          * ready to let us use it.
842          */
843         if (!dw->val) {
844                 dev_info(&cldev->dev, "Ownership req denied\n");
845                 return;
846         }
847
848         mei->got_ownership = true;
849         wake_up_all(&mei->get_ownership_wq);
850
851         iwl_mei_send_sap_msg(cldev,
852                              SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED);
853
854         /* We can now start the connection, unblock rfkill */
855         if (iwl_mei_cache.ops)
856                 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
857 }
858
859 static void iwl_mei_handle_ping(struct mei_cl_device *cldev,
860                                 const struct iwl_sap_hdr *hdr)
861 {
862         iwl_mei_send_sap_msg(cldev, SAP_MSG_NOTIF_PONG);
863 }
864
865 static void iwl_mei_handle_sap_msg(struct mei_cl_device *cldev,
866                                    const struct iwl_sap_hdr *hdr)
867 {
868         u16 len = le16_to_cpu(hdr->len) + sizeof(*hdr);
869         u16 type = le16_to_cpu(hdr->type);
870
871         dev_dbg(&cldev->dev,
872                 "Got a new SAP message: type %d, len %d, seq %d\n",
873                 le16_to_cpu(hdr->type), len,
874                 le32_to_cpu(hdr->seq_num));
875
876 #define SAP_MSG_HANDLER(_cmd, _handler, _sz)                            \
877         case SAP_MSG_NOTIF_ ## _cmd:                                    \
878                 if (len < _sz) {                                        \
879                         dev_err(&cldev->dev,                            \
880                                 "Bad size for %d: %u < %u\n",           \
881                                 le16_to_cpu(hdr->type),                 \
882                                 (unsigned int)len,                      \
883                                 (unsigned int)_sz);                     \
884                         break;                                          \
885                 }                                                       \
886                 mutex_lock(&iwl_mei_mutex);                             \
887                 _handler(cldev, (const void *)hdr);                     \
888                 mutex_unlock(&iwl_mei_mutex);                           \
889                 break
890
891 #define SAP_MSG_HANDLER_NO_LOCK(_cmd, _handler, _sz)                    \
892         case SAP_MSG_NOTIF_ ## _cmd:                                    \
893                 if (len < _sz) {                                        \
894                         dev_err(&cldev->dev,                            \
895                                 "Bad size for %d: %u < %u\n",           \
896                                 le16_to_cpu(hdr->type),                 \
897                                 (unsigned int)len,                      \
898                                 (unsigned int)_sz);                     \
899                         break;                                          \
900                 }                                                       \
901                 _handler(cldev, (const void *)hdr);                     \
902                 break
903
904 #define SAP_MSG_HANDLER_NO_HANDLER(_cmd, _sz)                           \
905         case SAP_MSG_NOTIF_ ## _cmd:                                    \
906                 if (len < _sz) {                                        \
907                         dev_err(&cldev->dev,                            \
908                                 "Bad size for %d: %u < %u\n",           \
909                                 le16_to_cpu(hdr->type),                 \
910                                 (unsigned int)len,                      \
911                                 (unsigned int)_sz);                     \
912                         break;                                          \
913                 }                                                       \
914                 break
915
916         switch (type) {
917         SAP_MSG_HANDLER(PING, iwl_mei_handle_ping, 0);
918         SAP_MSG_HANDLER(CSME_FILTERS,
919                         iwl_mei_handle_csme_filters,
920                         sizeof(struct iwl_sap_csme_filters));
921         SAP_MSG_HANDLER(CSME_CONN_STATUS,
922                         iwl_mei_handle_conn_status,
923                         sizeof(struct iwl_sap_notif_conn_status));
924         SAP_MSG_HANDLER_NO_LOCK(AMT_STATE,
925                                 iwl_mei_handle_amt_state,
926                                 sizeof(struct iwl_sap_msg_dw));
927         SAP_MSG_HANDLER_NO_HANDLER(PONG, 0);
928         SAP_MSG_HANDLER(NVM, iwl_mei_handle_nvm,
929                         sizeof(struct iwl_sap_nvm));
930         SAP_MSG_HANDLER(CSME_REPLY_TO_HOST_OWNERSHIP_REQ,
931                         iwl_mei_handle_rx_host_own_req,
932                         sizeof(struct iwl_sap_msg_dw));
933         SAP_MSG_HANDLER(NIC_OWNER, iwl_mei_handle_nic_owner,
934                         sizeof(struct iwl_sap_msg_dw));
935         SAP_MSG_HANDLER(CSME_CAN_RELEASE_OWNERSHIP,
936                         iwl_mei_handle_can_release_ownership, 0);
937         SAP_MSG_HANDLER(CSME_TAKING_OWNERSHIP,
938                         iwl_mei_handle_csme_taking_ownership, 0);
939         default:
940         /*
941          * This is not really an error, there are message that we decided
942          * to ignore, yet, it is useful to be able to leave a note if debug
943          * is enabled.
944          */
945         dev_dbg(&cldev->dev, "Unsupported message: type %d, len %d\n",
946                 le16_to_cpu(hdr->type), len);
947         }
948
949 #undef SAP_MSG_HANDLER
950 #undef SAP_MSG_HANDLER_NO_LOCK
951 }
952
953 static void iwl_mei_read_from_q(const u8 *q_head, u32 q_sz,
954                                 u32 *_rd, u32 wr,
955                                 void *_buf, u32 len)
956 {
957         u8 *buf = _buf;
958         u32 rd = *_rd;
959
960         if (rd + len <= q_sz) {
961                 memcpy(buf, q_head + rd, len);
962                 rd += len;
963         } else {
964                 memcpy(buf, q_head + rd, q_sz - rd);
965                 memcpy(buf + q_sz - rd, q_head, len - (q_sz - rd));
966                 rd = len - (q_sz - rd);
967         }
968
969         *_rd = rd;
970 }
971
972 #define QOS_HDR_IV_SNAP_LEN (sizeof(struct ieee80211_qos_hdr) +      \
973                              IEEE80211_TKIP_IV_LEN +                 \
974                              sizeof(rfc1042_header) + ETH_TLEN)
975
976 static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev,
977                                     const u8 *q_head, u32 q_sz,
978                                     u32 rd, u32 wr, ssize_t valid_rx_sz,
979                                     struct sk_buff_head *tx_skbs)
980 {
981         struct iwl_sap_hdr hdr;
982         struct net_device *netdev =
983                 rcu_dereference_protected(iwl_mei_cache.netdev,
984                                           lockdep_is_held(&iwl_mei_mutex));
985
986         if (!netdev)
987                 return;
988
989         while (valid_rx_sz >= sizeof(hdr)) {
990                 struct ethhdr *ethhdr;
991                 unsigned char *data;
992                 struct sk_buff *skb;
993                 u16 len;
994
995                 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, &hdr, sizeof(hdr));
996                 valid_rx_sz -= sizeof(hdr);
997                 len = le16_to_cpu(hdr.len);
998
999                 if (valid_rx_sz < len) {
1000                         dev_err(&cldev->dev,
1001                                 "Data queue is corrupted: valid data len %zd, len %d\n",
1002                                 valid_rx_sz, len);
1003                         break;
1004                 }
1005
1006                 if (len < sizeof(*ethhdr)) {
1007                         dev_err(&cldev->dev,
1008                                 "Data len is smaller than an ethernet header? len = %d\n",
1009                                 len);
1010                 }
1011
1012                 valid_rx_sz -= len;
1013
1014                 if (le16_to_cpu(hdr.type) != SAP_MSG_DATA_PACKET) {
1015                         dev_err(&cldev->dev, "Unsupported Rx data: type %d, len %d\n",
1016                                 le16_to_cpu(hdr.type), len);
1017                         continue;
1018                 }
1019
1020                 /* We need enough room for the WiFi header + SNAP + IV */
1021                 skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN);
1022
1023                 skb_reserve(skb, QOS_HDR_IV_SNAP_LEN);
1024                 ethhdr = skb_push(skb, sizeof(*ethhdr));
1025
1026                 iwl_mei_read_from_q(q_head, q_sz, &rd, wr,
1027                                     ethhdr, sizeof(*ethhdr));
1028                 len -= sizeof(*ethhdr);
1029
1030                 skb_reset_mac_header(skb);
1031                 skb_reset_network_header(skb);
1032                 skb->protocol = ethhdr->h_proto;
1033
1034                 data = skb_put(skb, len);
1035                 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, data, len);
1036
1037                 /*
1038                  * Enqueue the skb here so that it can be sent later when we
1039                  * do not hold the mutex. TX'ing a packet with a mutex held is
1040                  * possible, but it wouldn't be nice to forbid the TX path to
1041                  * call any of iwlmei's functions, since every API from iwlmei
1042                  * needs the mutex.
1043                  */
1044                 __skb_queue_tail(tx_skbs, skb);
1045         }
1046 }
1047
1048 static void iwl_mei_handle_sap_rx_cmd(struct mei_cl_device *cldev,
1049                                       const u8 *q_head, u32 q_sz,
1050                                       u32 rd, u32 wr, ssize_t valid_rx_sz)
1051 {
1052         struct page *p = alloc_page(GFP_KERNEL);
1053         struct iwl_sap_hdr *hdr;
1054
1055         if (!p)
1056                 return;
1057
1058         hdr = page_address(p);
1059
1060         while (valid_rx_sz >= sizeof(*hdr)) {
1061                 u16 len;
1062
1063                 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr, sizeof(*hdr));
1064                 valid_rx_sz -= sizeof(*hdr);
1065                 len = le16_to_cpu(hdr->len);
1066
1067                 if (valid_rx_sz < len)
1068                         break;
1069
1070                 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr + 1, len);
1071
1072                 trace_iwlmei_sap_cmd(hdr, false);
1073                 iwl_mei_handle_sap_msg(cldev, hdr);
1074                 valid_rx_sz -= len;
1075         }
1076
1077         /* valid_rx_sz must be 0 now... */
1078         if (valid_rx_sz)
1079                 dev_err(&cldev->dev,
1080                         "More data in the buffer although we read it all\n");
1081
1082         __free_page(p);
1083 }
1084
1085 static void iwl_mei_handle_sap_rx(struct mei_cl_device *cldev,
1086                                   struct iwl_sap_q_ctrl_blk *notif_q,
1087                                   const u8 *q_head,
1088                                   struct sk_buff_head *skbs,
1089                                   u32 q_sz)
1090 {
1091         u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
1092         u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
1093         ssize_t valid_rx_sz;
1094
1095         if (rd > q_sz || wr > q_sz) {
1096                 dev_err(&cldev->dev,
1097                         "Pointers are past the buffer limit\n");
1098                 return;
1099         }
1100
1101         if (rd == wr)
1102                 return;
1103
1104         valid_rx_sz = wr > rd ? wr - rd : q_sz - rd + wr;
1105
1106         if (skbs)
1107                 iwl_mei_handle_sap_data(cldev, q_head, q_sz, rd, wr,
1108                                         valid_rx_sz, skbs);
1109         else
1110                 iwl_mei_handle_sap_rx_cmd(cldev, q_head, q_sz, rd, wr,
1111                                           valid_rx_sz);
1112
1113         /* Increment the read pointer to point to the write pointer */
1114         WRITE_ONCE(notif_q->rd_ptr, cpu_to_le32(wr));
1115 }
1116
1117 static void iwl_mei_handle_check_shared_area(struct mei_cl_device *cldev)
1118 {
1119         struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1120         struct iwl_sap_q_ctrl_blk *notif_q;
1121         struct sk_buff_head tx_skbs;
1122         struct iwl_sap_dir *dir;
1123         void *q_head;
1124         u32 q_sz;
1125
1126         if (!mei->shared_mem.ctrl)
1127                 return;
1128
1129         dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1130         notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
1131         q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1132         q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1133
1134         /*
1135          * Do not hold the mutex here, but rather each and every message
1136          * handler takes it.
1137          * This allows message handlers to take it at a certain time.
1138          */
1139         iwl_mei_handle_sap_rx(cldev, notif_q, q_head, NULL, q_sz);
1140
1141         mutex_lock(&iwl_mei_mutex);
1142         dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1143         notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
1144         q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1145         q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1146
1147         __skb_queue_head_init(&tx_skbs);
1148
1149         iwl_mei_handle_sap_rx(cldev, notif_q, q_head, &tx_skbs, q_sz);
1150
1151         if (skb_queue_empty(&tx_skbs)) {
1152                 mutex_unlock(&iwl_mei_mutex);
1153                 return;
1154         }
1155
1156         /*
1157          * Take the RCU read lock before we unlock the mutex to make sure that
1158          * even if the netdev is replaced by another non-NULL netdev right after
1159          * we unlock the mutex, the old netdev will still be valid when we
1160          * transmit the frames. We can't allow to replace the netdev here because
1161          * the skbs hold a pointer to the netdev.
1162          */
1163         rcu_read_lock();
1164
1165         mutex_unlock(&iwl_mei_mutex);
1166
1167         if (!rcu_access_pointer(iwl_mei_cache.netdev)) {
1168                 dev_err(&cldev->dev, "Can't Tx without a netdev\n");
1169                 skb_queue_purge(&tx_skbs);
1170                 goto out;
1171         }
1172
1173         while (!skb_queue_empty(&tx_skbs)) {
1174                 struct sk_buff *skb = __skb_dequeue(&tx_skbs);
1175
1176                 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_TO_AIR);
1177                 dev_queue_xmit(skb);
1178         }
1179
1180 out:
1181         rcu_read_unlock();
1182 }
1183
1184 static void iwl_mei_rx(struct mei_cl_device *cldev)
1185 {
1186         struct iwl_sap_me_msg_hdr *hdr;
1187         u8 msg[100];
1188         ssize_t ret;
1189
1190         ret = mei_cldev_recv(cldev, (u8 *)&msg, sizeof(msg));
1191         if (ret < 0) {
1192                 dev_err(&cldev->dev, "failed to receive data: %zd\n", ret);
1193                 return;
1194         }
1195
1196         if (ret == 0) {
1197                 dev_err(&cldev->dev, "got an empty response\n");
1198                 return;
1199         }
1200
1201         hdr = (void *)msg;
1202         trace_iwlmei_me_msg(hdr, false);
1203
1204         switch (le32_to_cpu(hdr->type)) {
1205         case SAP_ME_MSG_START_OK:
1206                 BUILD_BUG_ON(sizeof(struct iwl_sap_me_msg_start_ok) >
1207                              sizeof(msg));
1208
1209                 iwl_mei_handle_rx_start_ok(cldev, (void *)msg, ret);
1210                 break;
1211         case SAP_ME_MSG_CHECK_SHARED_AREA:
1212                 iwl_mei_handle_check_shared_area(cldev);
1213                 break;
1214         default:
1215                 dev_err(&cldev->dev, "got a RX notification: %d\n",
1216                         le32_to_cpu(hdr->type));
1217                 break;
1218         }
1219 }
1220
1221 static int iwl_mei_send_start(struct mei_cl_device *cldev)
1222 {
1223         struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1224         struct iwl_sap_me_msg_start msg = {
1225                 .hdr.type = cpu_to_le32(SAP_ME_MSG_START),
1226                 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
1227                 .hdr.len = cpu_to_le32(sizeof(msg)),
1228                 .supported_versions[0] = SAP_VERSION,
1229                 .init_data_seq_num = cpu_to_le16(0x100),
1230                 .init_notif_seq_num = cpu_to_le16(0x800),
1231         };
1232         int ret;
1233
1234         trace_iwlmei_me_msg(&msg.hdr, true);
1235         ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
1236         if (ret != sizeof(msg)) {
1237                 dev_err(&cldev->dev,
1238                         "failed to send the SAP_ME_MSG_START message %d\n",
1239                         ret);
1240                 return ret;
1241         }
1242
1243         return 0;
1244 }
1245
1246 static int iwl_mei_enable(struct mei_cl_device *cldev)
1247 {
1248         int ret;
1249
1250         ret = mei_cldev_enable(cldev);
1251         if (ret < 0) {
1252                 dev_err(&cldev->dev, "failed to enable the device: %d\n", ret);
1253                 return ret;
1254         }
1255
1256         ret = mei_cldev_register_rx_cb(cldev, iwl_mei_rx);
1257         if (ret) {
1258                 dev_err(&cldev->dev,
1259                         "failed to register to the rx cb: %d\n", ret);
1260                 mei_cldev_disable(cldev);
1261                 return ret;
1262         }
1263
1264         return 0;
1265 }
1266
1267 struct iwl_mei_nvm *iwl_mei_get_nvm(void)
1268 {
1269         struct iwl_mei_nvm *nvm = NULL;
1270         struct iwl_mei *mei;
1271         int ret;
1272
1273         mutex_lock(&iwl_mei_mutex);
1274
1275         if (!iwl_mei_is_connected())
1276                 goto out;
1277
1278         mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1279
1280         if (!mei)
1281                 goto out;
1282
1283         ret = iwl_mei_send_sap_msg(iwl_mei_global_cldev,
1284                                    SAP_MSG_NOTIF_GET_NVM);
1285         if (ret)
1286                 goto out;
1287
1288         mutex_unlock(&iwl_mei_mutex);
1289
1290         ret = wait_event_timeout(mei->get_nvm_wq, mei->nvm, 2 * HZ);
1291         if (!ret)
1292                 return NULL;
1293
1294         mutex_lock(&iwl_mei_mutex);
1295
1296         if (!iwl_mei_is_connected())
1297                 goto out;
1298
1299         mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1300
1301         if (!mei)
1302                 goto out;
1303
1304         if (mei->nvm)
1305                 nvm = kmemdup(mei->nvm, sizeof(*mei->nvm), GFP_KERNEL);
1306
1307 out:
1308         mutex_unlock(&iwl_mei_mutex);
1309         return nvm;
1310 }
1311 EXPORT_SYMBOL_GPL(iwl_mei_get_nvm);
1312
1313 int iwl_mei_get_ownership(void)
1314 {
1315         struct iwl_mei *mei;
1316         int ret;
1317
1318         mutex_lock(&iwl_mei_mutex);
1319
1320         /* In case we didn't have a bind */
1321         if (!iwl_mei_is_connected()) {
1322                 ret = 0;
1323                 goto out;
1324         }
1325
1326         mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1327
1328         if (!mei) {
1329                 ret = -ENODEV;
1330                 goto out;
1331         }
1332
1333         if (!mei->amt_enabled) {
1334                 ret = 0;
1335                 goto out;
1336         }
1337
1338         if (mei->got_ownership) {
1339                 ret = 0;
1340                 goto out;
1341         }
1342
1343         ret = iwl_mei_send_sap_msg(mei->cldev,
1344                                    SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
1345         if (ret)
1346                 goto out;
1347
1348         mutex_unlock(&iwl_mei_mutex);
1349
1350         ret = wait_event_timeout(mei->get_ownership_wq,
1351                                  mei->got_ownership, HZ / 2);
1352         if (!ret)
1353                 return -ETIMEDOUT;
1354
1355         mutex_lock(&iwl_mei_mutex);
1356
1357         /* In case we didn't have a bind */
1358         if (!iwl_mei_is_connected()) {
1359                 ret = 0;
1360                 goto out;
1361         }
1362
1363         mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1364
1365         if (!mei) {
1366                 ret = -ENODEV;
1367                 goto out;
1368         }
1369
1370         ret = !mei->got_ownership;
1371
1372 out:
1373         mutex_unlock(&iwl_mei_mutex);
1374         return ret;
1375 }
1376 EXPORT_SYMBOL_GPL(iwl_mei_get_ownership);
1377
1378 void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
1379                              const struct iwl_mei_colloc_info *colloc_info)
1380 {
1381         struct iwl_sap_notif_host_link_up msg = {
1382                 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
1383                 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1384                 .conn_info = {
1385                         .ssid_len = cpu_to_le32(conn_info->ssid_len),
1386                         .channel = conn_info->channel,
1387                         .band = conn_info->band,
1388                         .pairwise_cipher = cpu_to_le32(conn_info->pairwise_cipher),
1389                         .auth_mode = cpu_to_le32(conn_info->auth_mode),
1390                 },
1391         };
1392         struct iwl_mei *mei;
1393
1394         if (conn_info->ssid_len > ARRAY_SIZE(msg.conn_info.ssid))
1395                 return;
1396
1397         memcpy(msg.conn_info.ssid, conn_info->ssid, conn_info->ssid_len);
1398         memcpy(msg.conn_info.bssid, conn_info->bssid, ETH_ALEN);
1399
1400         if (colloc_info) {
1401                 msg.colloc_channel = colloc_info->channel;
1402                 msg.colloc_band = colloc_info->channel <= 14 ? 0 : 1;
1403                 memcpy(msg.colloc_bssid, colloc_info->bssid, ETH_ALEN);
1404         }
1405
1406         mutex_lock(&iwl_mei_mutex);
1407
1408         if (!iwl_mei_is_connected())
1409                 goto out;
1410
1411         mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1412
1413         if (!mei)
1414                 goto out;
1415
1416         if (!mei->amt_enabled)
1417                 goto out;
1418
1419         iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1420
1421 out:
1422         kfree(iwl_mei_cache.conn_info);
1423         iwl_mei_cache.conn_info =
1424                 kmemdup(&msg.conn_info, sizeof(msg.conn_info), GFP_KERNEL);
1425         mutex_unlock(&iwl_mei_mutex);
1426 }
1427 EXPORT_SYMBOL_GPL(iwl_mei_host_associated);
1428
1429 void iwl_mei_host_disassociated(void)
1430 {
1431         struct iwl_mei *mei;
1432         struct iwl_sap_notif_host_link_down msg = {
1433                 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_DOWN),
1434                 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1435                 .type = HOST_LINK_DOWN_TYPE_LONG,
1436         };
1437
1438         mutex_lock(&iwl_mei_mutex);
1439
1440         if (!iwl_mei_is_connected())
1441                 goto out;
1442
1443         mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1444
1445         if (!mei)
1446                 goto out;
1447
1448         iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1449
1450 out:
1451         kfree(iwl_mei_cache.conn_info);
1452         iwl_mei_cache.conn_info = NULL;
1453         mutex_unlock(&iwl_mei_mutex);
1454 }
1455 EXPORT_SYMBOL_GPL(iwl_mei_host_disassociated);
1456
1457 void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill)
1458 {
1459         struct iwl_mei *mei;
1460         u32 rfkill_state = 0;
1461         struct iwl_sap_msg_dw msg = {
1462                 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
1463                 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1464         };
1465
1466         if (!sw_rfkill)
1467                 rfkill_state |= SAP_SW_RFKILL_DEASSERTED;
1468
1469         if (!hw_rfkill)
1470                 rfkill_state |= SAP_HW_RFKILL_DEASSERTED;
1471
1472         mutex_lock(&iwl_mei_mutex);
1473
1474         if (!iwl_mei_is_connected())
1475                 goto out;
1476
1477         msg.val = cpu_to_le32(rfkill_state);
1478
1479         mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1480
1481         if (!mei)
1482                 goto out;
1483
1484         iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1485
1486 out:
1487         iwl_mei_cache.rf_kill = rfkill_state;
1488         mutex_unlock(&iwl_mei_mutex);
1489 }
1490 EXPORT_SYMBOL_GPL(iwl_mei_set_rfkill_state);
1491
1492 void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address)
1493 {
1494         struct iwl_mei *mei;
1495         struct iwl_sap_notif_host_nic_info msg = {
1496                 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
1497                 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1498         };
1499
1500         mutex_lock(&iwl_mei_mutex);
1501
1502         if (!iwl_mei_is_connected())
1503                 goto out;
1504
1505         ether_addr_copy(msg.mac_address, mac_address);
1506         ether_addr_copy(msg.nvm_address, nvm_address);
1507
1508         mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1509
1510         if (!mei)
1511                 goto out;
1512
1513         iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1514
1515 out:
1516         ether_addr_copy(iwl_mei_cache.mac_address, mac_address);
1517         ether_addr_copy(iwl_mei_cache.nvm_address, nvm_address);
1518         mutex_unlock(&iwl_mei_mutex);
1519 }
1520 EXPORT_SYMBOL_GPL(iwl_mei_set_nic_info);
1521
1522 void iwl_mei_set_country_code(u16 mcc)
1523 {
1524         struct iwl_mei *mei;
1525         struct iwl_sap_notif_country_code msg = {
1526                 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
1527                 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1528                 .mcc = cpu_to_le16(mcc),
1529         };
1530
1531         mutex_lock(&iwl_mei_mutex);
1532
1533         if (!iwl_mei_is_connected())
1534                 goto out;
1535
1536         mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1537
1538         if (!mei)
1539                 goto out;
1540
1541         iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1542
1543 out:
1544         iwl_mei_cache.mcc = mcc;
1545         mutex_unlock(&iwl_mei_mutex);
1546 }
1547 EXPORT_SYMBOL_GPL(iwl_mei_set_country_code);
1548
1549 void iwl_mei_set_power_limit(const __le16 *power_limit)
1550 {
1551         struct iwl_mei *mei;
1552         struct iwl_sap_notif_sar_limits msg = {
1553                 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
1554                 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1555         };
1556
1557         mutex_lock(&iwl_mei_mutex);
1558
1559         if (!iwl_mei_is_connected())
1560                 goto out;
1561
1562         mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1563
1564         if (!mei)
1565                 goto out;
1566
1567         memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table));
1568
1569         iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1570
1571 out:
1572         kfree(iwl_mei_cache.power_limit);
1573         iwl_mei_cache.power_limit = kmemdup(power_limit,
1574                                             sizeof(msg.sar_chain_info_table), GFP_KERNEL);
1575         mutex_unlock(&iwl_mei_mutex);
1576 }
1577 EXPORT_SYMBOL_GPL(iwl_mei_set_power_limit);
1578
1579 void iwl_mei_set_netdev(struct net_device *netdev)
1580 {
1581         struct iwl_mei *mei;
1582
1583         mutex_lock(&iwl_mei_mutex);
1584
1585         if (!iwl_mei_is_connected()) {
1586                 rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1587                 goto out;
1588         }
1589
1590         mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1591
1592         if (!mei)
1593                 goto out;
1594
1595         if (!netdev) {
1596                 struct net_device *dev =
1597                         rcu_dereference_protected(iwl_mei_cache.netdev,
1598                                                   lockdep_is_held(&iwl_mei_mutex));
1599
1600                 if (!dev)
1601                         goto out;
1602
1603                 netdev_rx_handler_unregister(dev);
1604         }
1605
1606         rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1607
1608         if (netdev && mei->amt_enabled)
1609                 netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
1610
1611 out:
1612         mutex_unlock(&iwl_mei_mutex);
1613 }
1614 EXPORT_SYMBOL_GPL(iwl_mei_set_netdev);
1615
1616 void iwl_mei_device_down(void)
1617 {
1618         struct iwl_mei *mei;
1619
1620         mutex_lock(&iwl_mei_mutex);
1621
1622         if (!iwl_mei_is_connected())
1623                 goto out;
1624
1625         mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1626
1627         if (!mei)
1628                 goto out;
1629
1630         if (!mei->csme_taking_ownership)
1631                 goto out;
1632
1633         iwl_mei_send_sap_msg(mei->cldev,
1634                              SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
1635         mei->csme_taking_ownership = false;
1636 out:
1637         mutex_unlock(&iwl_mei_mutex);
1638 }
1639 EXPORT_SYMBOL_GPL(iwl_mei_device_down);
1640
1641 int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
1642 {
1643         int ret;
1644
1645         /*
1646          * We must have a non-NULL priv pointer to not crash when there are
1647          * multiple WiFi devices.
1648          */
1649         if (!priv)
1650                 return -EINVAL;
1651
1652         mutex_lock(&iwl_mei_mutex);
1653
1654         /* do not allow registration if someone else already registered */
1655         if (iwl_mei_cache.priv || iwl_mei_cache.ops) {
1656                 ret = -EBUSY;
1657                 goto out;
1658         }
1659
1660         iwl_mei_cache.priv = priv;
1661         iwl_mei_cache.ops = ops;
1662
1663         if (iwl_mei_global_cldev) {
1664                 struct iwl_mei *mei =
1665                         mei_cldev_get_drvdata(iwl_mei_global_cldev);
1666
1667                 /* we have already a SAP connection */
1668                 if (iwl_mei_is_connected()) {
1669                         iwl_mei_send_sap_msg(mei->cldev,
1670                                              SAP_MSG_NOTIF_WIFIDR_UP);
1671                         ops->rfkill(priv, mei->link_prot_state);
1672                 }
1673         }
1674         ret = 0;
1675
1676 out:
1677         mutex_unlock(&iwl_mei_mutex);
1678         return ret;
1679 }
1680 EXPORT_SYMBOL_GPL(iwl_mei_register);
1681
1682 void iwl_mei_start_unregister(void)
1683 {
1684         mutex_lock(&iwl_mei_mutex);
1685
1686         /* At this point, the wifi driver should have removed the netdev */
1687         if (rcu_access_pointer(iwl_mei_cache.netdev))
1688                 pr_err("Still had a netdev pointer set upon unregister\n");
1689
1690         kfree(iwl_mei_cache.conn_info);
1691         iwl_mei_cache.conn_info = NULL;
1692         kfree(iwl_mei_cache.power_limit);
1693         iwl_mei_cache.power_limit = NULL;
1694         iwl_mei_cache.ops = NULL;
1695         /* leave iwl_mei_cache.priv non-NULL to prevent any new registration */
1696
1697         mutex_unlock(&iwl_mei_mutex);
1698 }
1699 EXPORT_SYMBOL_GPL(iwl_mei_start_unregister);
1700
1701 void iwl_mei_unregister_complete(void)
1702 {
1703         mutex_lock(&iwl_mei_mutex);
1704
1705         iwl_mei_cache.priv = NULL;
1706
1707         if (iwl_mei_global_cldev) {
1708                 struct iwl_mei *mei =
1709                         mei_cldev_get_drvdata(iwl_mei_global_cldev);
1710
1711                 iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_DOWN);
1712                 mei->got_ownership = false;
1713         }
1714
1715         mutex_unlock(&iwl_mei_mutex);
1716 }
1717 EXPORT_SYMBOL_GPL(iwl_mei_unregister_complete);
1718
1719 #if IS_ENABLED(CONFIG_DEBUG_FS)
1720
1721 static ssize_t
1722 iwl_mei_dbgfs_send_start_message_write(struct file *file,
1723                                        const char __user *user_buf,
1724                                        size_t count, loff_t *ppos)
1725 {
1726         int ret;
1727
1728         mutex_lock(&iwl_mei_mutex);
1729
1730         if (!iwl_mei_global_cldev) {
1731                 ret = -ENODEV;
1732                 goto out;
1733         }
1734
1735         ret = iwl_mei_send_start(iwl_mei_global_cldev);
1736
1737 out:
1738         mutex_unlock(&iwl_mei_mutex);
1739         return ret ?: count;
1740 }
1741
1742 static const struct file_operations iwl_mei_dbgfs_send_start_message_ops = {
1743         .write = iwl_mei_dbgfs_send_start_message_write,
1744         .open = simple_open,
1745         .llseek = default_llseek,
1746 };
1747
1748 static ssize_t iwl_mei_dbgfs_req_ownership_write(struct file *file,
1749                                                  const char __user *user_buf,
1750                                                  size_t count, loff_t *ppos)
1751 {
1752         iwl_mei_get_ownership();
1753
1754         return count;
1755 }
1756
1757 static const struct file_operations iwl_mei_dbgfs_req_ownership_ops = {
1758         .write = iwl_mei_dbgfs_req_ownership_write,
1759         .open = simple_open,
1760         .llseek = default_llseek,
1761 };
1762
1763 static void iwl_mei_dbgfs_register(struct iwl_mei *mei)
1764 {
1765         mei->dbgfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1766
1767         if (!mei->dbgfs_dir)
1768                 return;
1769
1770         debugfs_create_ulong("status", S_IRUSR,
1771                              mei->dbgfs_dir, &iwl_mei_status);
1772         debugfs_create_file("send_start_message", S_IWUSR, mei->dbgfs_dir,
1773                             mei, &iwl_mei_dbgfs_send_start_message_ops);
1774         debugfs_create_file("req_ownership", S_IWUSR, mei->dbgfs_dir,
1775                             mei, &iwl_mei_dbgfs_req_ownership_ops);
1776 }
1777
1778 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei)
1779 {
1780         debugfs_remove_recursive(mei->dbgfs_dir);
1781         mei->dbgfs_dir = NULL;
1782 }
1783
1784 #else
1785
1786 static void iwl_mei_dbgfs_register(struct iwl_mei *mei) {}
1787 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
1788
1789 #endif /* CONFIG_DEBUG_FS */
1790
1791 #define ALLOC_SHARED_MEM_RETRY_MAX_NUM  3
1792
1793 /*
1794  * iwl_mei_probe - the probe function called by the mei bus enumeration
1795  *
1796  * This allocates the data needed by iwlmei and sets a pointer to this data
1797  * into the mei_cl_device's drvdata.
1798  * It starts the SAP protocol by sending the SAP_ME_MSG_START without
1799  * waiting for the answer. The answer will be caught later by the Rx callback.
1800  */
1801 static int iwl_mei_probe(struct mei_cl_device *cldev,
1802                          const struct mei_cl_device_id *id)
1803 {
1804         int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM;
1805         struct iwl_mei *mei;
1806         int ret;
1807
1808         mei = devm_kzalloc(&cldev->dev, sizeof(*mei), GFP_KERNEL);
1809         if (!mei)
1810                 return -ENOMEM;
1811
1812         init_waitqueue_head(&mei->get_nvm_wq);
1813         INIT_WORK(&mei->send_csa_msg_wk, iwl_mei_send_csa_msg_wk);
1814         INIT_DELAYED_WORK(&mei->csa_throttle_end_wk,
1815                           iwl_mei_csa_throttle_end_wk);
1816         init_waitqueue_head(&mei->get_ownership_wq);
1817         spin_lock_init(&mei->data_q_lock);
1818
1819         mei_cldev_set_drvdata(cldev, mei);
1820         mei->cldev = cldev;
1821
1822         do {
1823                 ret = iwl_mei_alloc_shared_mem(cldev);
1824                 if (!ret)
1825                         break;
1826                 /*
1827                  * The CSME firmware needs to boot the internal WLAN client.
1828                  * This can take time in certain configurations (usually
1829                  * upon resume and when the whole CSME firmware is shut down
1830                  * during suspend).
1831                  *
1832                  * Wait a bit before retrying and hope we'll succeed next time.
1833                  */
1834
1835                 dev_dbg(&cldev->dev,
1836                         "Couldn't allocate the shared memory: %d, attempt %d / %d\n",
1837                         ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM);
1838                 msleep(100);
1839                 alloc_retry--;
1840         } while (alloc_retry);
1841
1842         if (ret) {
1843                 dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
1844                         ret);
1845                 goto free;
1846         }
1847
1848         iwl_mei_init_shared_mem(mei);
1849
1850         ret = iwl_mei_enable(cldev);
1851         if (ret)
1852                 goto free_shared_mem;
1853
1854         iwl_mei_dbgfs_register(mei);
1855
1856         /*
1857          * We now have a Rx function in place, start the SAP procotol
1858          * we expect to get the SAP_ME_MSG_START_OK response later on.
1859          */
1860         mutex_lock(&iwl_mei_mutex);
1861         ret = iwl_mei_send_start(cldev);
1862         mutex_unlock(&iwl_mei_mutex);
1863         if (ret)
1864                 goto debugfs_unregister;
1865
1866         /* must be last */
1867         iwl_mei_global_cldev = cldev;
1868
1869         return 0;
1870
1871 debugfs_unregister:
1872         iwl_mei_dbgfs_unregister(mei);
1873         mei_cldev_disable(cldev);
1874 free_shared_mem:
1875         iwl_mei_free_shared_mem(cldev);
1876 free:
1877         mei_cldev_set_drvdata(cldev, NULL);
1878         devm_kfree(&cldev->dev, mei);
1879
1880         return ret;
1881 }
1882
1883 #define SEND_SAP_MAX_WAIT_ITERATION 10
1884
1885 static void iwl_mei_remove(struct mei_cl_device *cldev)
1886 {
1887         struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1888         int i;
1889
1890         /*
1891          * We are being removed while the bus is active, it means we are
1892          * going to suspend/ shutdown, so the NIC will disappear.
1893          */
1894         if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops)
1895                 iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv);
1896
1897         if (rcu_access_pointer(iwl_mei_cache.netdev)) {
1898                 struct net_device *dev;
1899
1900                 /*
1901                  * First take rtnl and only then the mutex to avoid an ABBA
1902                  * with iwl_mei_set_netdev()
1903                  */
1904                 rtnl_lock();
1905                 mutex_lock(&iwl_mei_mutex);
1906
1907                 /*
1908                  * If we are suspending and the wifi driver hasn't removed it's netdev
1909                  * yet, do it now. In any case, don't change the cache.netdev pointer.
1910                  */
1911                 dev = rcu_dereference_protected(iwl_mei_cache.netdev,
1912                                                 lockdep_is_held(&iwl_mei_mutex));
1913
1914                 netdev_rx_handler_unregister(dev);
1915                 mutex_unlock(&iwl_mei_mutex);
1916                 rtnl_unlock();
1917         }
1918
1919         mutex_lock(&iwl_mei_mutex);
1920
1921         /*
1922          * Tell CSME that we are going down so that it won't access the
1923          * memory anymore, make sure this message goes through immediately.
1924          */
1925         mei->csa_throttled = false;
1926         iwl_mei_send_sap_msg(mei->cldev,
1927                              SAP_MSG_NOTIF_HOST_GOES_DOWN);
1928
1929         for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) {
1930                 if (!iwl_mei_host_to_me_data_pending(mei))
1931                         break;
1932
1933                 msleep(5);
1934         }
1935
1936         /*
1937          * If we couldn't make sure that CSME saw the HOST_GOES_DOWN message,
1938          * it means that it will probably keep reading memory that we are going
1939          * to unmap and free, expect IOMMU error messages.
1940          */
1941         if (i == SEND_SAP_MAX_WAIT_ITERATION)
1942                 dev_err(&mei->cldev->dev,
1943                         "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n");
1944
1945         mutex_unlock(&iwl_mei_mutex);
1946
1947         /*
1948          * This looks strange, but this lock is taken here to make sure that
1949          * iwl_mei_add_data_to_ring called from the Tx path sees that we
1950          * clear the IWL_MEI_STATUS_SAP_CONNECTED bit.
1951          * Rx isn't a problem because the rx_handler can't be called after
1952          * having been unregistered.
1953          */
1954         spin_lock_bh(&mei->data_q_lock);
1955         clear_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
1956         spin_unlock_bh(&mei->data_q_lock);
1957
1958         if (iwl_mei_cache.ops)
1959                 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
1960
1961         /*
1962          * mei_cldev_disable will return only after all the MEI Rx is done.
1963          * It must be called when iwl_mei_mutex is *not* held, since it waits
1964          * for our Rx handler to complete.
1965          * After it returns, no new Rx will start.
1966          */
1967         mei_cldev_disable(cldev);
1968
1969         /*
1970          * Since the netdev was already removed and the netdev's removal
1971          * includes a call to synchronize_net() so that we know there won't be
1972          * any new Rx that will trigger the following workers.
1973          */
1974         cancel_work_sync(&mei->send_csa_msg_wk);
1975         cancel_delayed_work_sync(&mei->csa_throttle_end_wk);
1976
1977         /*
1978          * If someone waits for the ownership, let him know that we are going
1979          * down and that we are not connected anymore. He'll be able to take
1980          * the device.
1981          */
1982         wake_up_all(&mei->get_ownership_wq);
1983
1984         mutex_lock(&iwl_mei_mutex);
1985
1986         iwl_mei_global_cldev = NULL;
1987
1988         wake_up_all(&mei->get_nvm_wq);
1989
1990         iwl_mei_free_shared_mem(cldev);
1991
1992         iwl_mei_dbgfs_unregister(mei);
1993
1994         mei_cldev_set_drvdata(cldev, NULL);
1995
1996         kfree(mei->nvm);
1997
1998         kfree(rcu_access_pointer(mei->filters));
1999
2000         devm_kfree(&cldev->dev, mei);
2001
2002         mutex_unlock(&iwl_mei_mutex);
2003 }
2004
2005 static const struct mei_cl_device_id iwl_mei_tbl[] = {
2006         { KBUILD_MODNAME, MEI_WLAN_UUID, MEI_CL_VERSION_ANY},
2007
2008         /* required last entry */
2009         { }
2010 };
2011
2012 /*
2013  * Do not export the device table because this module is loaded by
2014  * iwlwifi's dependency.
2015  */
2016
2017 static struct mei_cl_driver iwl_mei_cl_driver = {
2018         .id_table = iwl_mei_tbl,
2019         .name = KBUILD_MODNAME,
2020         .probe = iwl_mei_probe,
2021         .remove = iwl_mei_remove,
2022 };
2023
2024 module_mei_cl_driver(iwl_mei_cl_driver);