1 /* SPDX-License-Identifier: ISC */
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
9 #include <linux/kernel.h>
11 #include <linux/spinlock.h>
12 #include <linux/skbuff.h>
13 #include <linux/leds.h>
14 #include <linux/usb.h>
15 #include <linux/average.h>
16 #include <net/mac80211.h>
19 #define MT_TX_RING_SIZE 256
20 #define MT_MCU_RING_SIZE 32
21 #define MT_RX_BUF_SIZE 2048
22 #define MT_SKB_HEAD_LEN 128
27 struct mt76_reg_pair {
38 u32 (*rr)(struct mt76_dev *dev, u32 offset);
39 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
40 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
41 void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data,
43 void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data,
45 int (*wr_rp)(struct mt76_dev *dev, u32 base,
46 const struct mt76_reg_pair *rp, int len);
47 int (*rd_rp)(struct mt76_dev *dev, u32 base,
48 struct mt76_reg_pair *rp, int len);
49 enum mt76_bus_type type;
52 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB)
53 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO)
56 MT_TXQ_VO = IEEE80211_AC_VO,
57 MT_TXQ_VI = IEEE80211_AC_VI,
58 MT_TXQ_BE = IEEE80211_AC_BE,
59 MT_TXQ_BK = IEEE80211_AC_BK,
74 struct mt76_queue_buf {
80 struct mt76_queue_buf buf[32];
86 struct mt76_queue_entry {
92 struct mt76_txwi_cache *txwi;
101 struct mt76_queue_regs {
106 } __packed __aligned(4);
109 struct mt76_queue_regs __iomem *regs;
112 struct mt76_queue_entry *entry;
113 struct mt76_desc *desc;
127 struct sk_buff *rx_head;
128 struct page_frag_cache rx_page;
131 struct mt76_sw_queue {
132 struct mt76_queue *q;
134 struct list_head swq;
138 struct mt76_mcu_ops {
139 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
140 int len, bool wait_resp);
141 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
142 const struct mt76_reg_pair *rp, int len);
143 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
144 struct mt76_reg_pair *rp, int len);
145 int (*mcu_restart)(struct mt76_dev *dev);
148 struct mt76_queue_ops {
149 int (*init)(struct mt76_dev *dev);
151 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
152 int idx, int n_desc, int bufsize,
155 int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
156 struct sk_buff *skb, struct mt76_wcid *wcid,
157 struct ieee80211_sta *sta);
159 int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid,
160 struct sk_buff *skb, u32 tx_info);
162 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
163 int *len, u32 *info, bool *more);
165 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
167 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
170 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
173 enum mt76_wcid_flags {
174 MT_WCID_FLAG_CHECK_PS,
178 #define MT76_N_WCIDS 128
180 DECLARE_EWMA(signal, 10, 8);
182 #define MT_WCID_TX_INFO_RATE GENMASK(15, 0)
183 #define MT_WCID_TX_INFO_NSS GENMASK(17, 16)
184 #define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18)
185 #define MT_WCID_TX_INFO_SET BIT(31)
188 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
192 struct ewma_signal rssi;
201 u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
211 struct mt76_sw_queue *swq;
212 struct mt76_wcid *wcid;
214 struct sk_buff_head retry_q;
221 struct mt76_txwi_cache {
222 struct list_head list;
229 struct rcu_head rcu_head;
231 struct mt76_dev *dev;
234 struct delayed_work reorder_work;
240 u8 started:1, stopped:1, timer_pending:1;
242 struct sk_buff *reorder_buf[];
245 #define MT_TX_CB_DMA_DONE BIT(0)
246 #define MT_TX_CB_TXS_DONE BIT(1)
247 #define MT_TX_CB_TXS_FAILED BIT(2)
249 #define MT_PACKET_ID_MASK GENMASK(6, 0)
250 #define MT_PACKET_ID_NO_ACK 0
251 #define MT_PACKET_ID_NO_SKB 1
252 #define MT_PACKET_ID_FIRST 2
253 #define MT_PACKET_ID_HAS_RATE BIT(7)
255 #define MT_TX_STATUS_SKB_TIMEOUT HZ
258 unsigned long jiffies;
265 MT76_STATE_INITIALIZED,
267 MT76_STATE_MCU_RUNNING,
279 #define MT_DRV_TXWI_NO_FREE BIT(0)
280 #define MT_DRV_TX_ALIGNED4_SKBS BIT(1)
281 #define MT_DRV_SW_RX_AIRTIME BIT(2)
283 struct mt76_driver_ops {
288 void (*update_survey)(struct mt76_dev *dev);
290 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
291 enum mt76_txq_id qid, struct mt76_wcid *wcid,
292 struct ieee80211_sta *sta,
293 struct mt76_tx_info *tx_info);
295 void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
296 struct mt76_queue_entry *e);
298 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
300 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
301 struct sk_buff *skb);
303 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
305 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
308 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
309 struct ieee80211_sta *sta);
311 void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
312 struct ieee80211_sta *sta);
314 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
315 struct ieee80211_sta *sta);
318 struct mt76_channel_state {
327 struct ieee80211_supported_band sband;
328 struct mt76_channel_state *chan;
331 struct mt76_rate_power {
345 #define MT_VEND_TYPE_EEPROM BIT(31)
346 #define MT_VEND_TYPE_CFG BIT(30)
347 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
349 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
351 MT_VEND_DEV_MODE = 0x1,
353 MT_VEND_MULTI_WRITE = 0x6,
354 MT_VEND_MULTI_READ = 0x7,
355 MT_VEND_READ_EEPROM = 0x9,
356 MT_VEND_WRITE_FCE = 0x42,
357 MT_VEND_WRITE_CFG = 0x46,
358 MT_VEND_READ_CFG = 0x47,
368 MT_EP_OUT_INBAND_CMD,
377 #define MT_TX_SG_MAX_SIZE 8
378 #define MT_RX_SG_MAX_SIZE 1
379 #define MT_NUM_TX_ENTRIES 256
380 #define MT_NUM_RX_ENTRIES 128
381 #define MCU_RESP_URB_SIZE 1024
383 struct mutex usb_ctrl_mtx;
389 struct tasklet_struct rx_tasklet;
390 struct workqueue_struct *stat_wq;
391 struct work_struct stat_work;
393 u8 out_ep[__MT_EP_OUT_MAX];
394 u8 in_ep[__MT_EP_IN_MAX];
403 struct mt76_reg_pair *rp;
414 wait_queue_head_t wait;
415 struct sk_buff_head res_q;
424 struct mt76_rx_status {
426 struct mt76_wcid *wcid;
430 unsigned long reorder_time;
449 s8 chain_signal[IEEE80211_MAX_CHAINS];
453 struct ieee80211_hw *hw;
454 struct cfg80211_chan_def chandef;
455 struct ieee80211_channel *main_chan;
457 struct mt76_channel_state *chan_state;
463 struct mt76_rx_status rx_ampdu_status;
469 const struct mt76_bus_ops *bus;
470 const struct mt76_driver_ops *drv;
471 const struct mt76_mcu_ops *mcu_ops;
474 struct net_device napi_dev;
476 struct napi_struct napi[__MT_RXQ_MAX];
477 struct sk_buff_head rx_skb[__MT_RXQ_MAX];
480 struct list_head txwi_cache;
481 struct mt76_sw_queue q_tx[__MT_TXQ_MAX];
482 struct mt76_queue q_rx[__MT_RXQ_MAX];
483 const struct mt76_queue_ops *queue_ops;
486 struct tasklet_struct tx_tasklet;
487 struct napi_struct tx_napi;
488 struct delayed_work mac_work;
490 wait_queue_head_t tx_wait;
491 struct sk_buff_head status_list;
493 unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];
495 struct mt76_wcid global_wcid;
496 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
498 u8 macaddr[ETH_ALEN];
507 struct tasklet_struct pre_tbtt_tasklet;
511 struct mt76_sband sband_2g;
512 struct mt76_sband sband_5g;
513 struct debugfs_blob_wrapper eeprom;
514 struct debugfs_blob_wrapper otp;
515 struct mt76_hw_cap cap;
517 struct mt76_rate_power rate_power;
521 enum nl80211_dfs_regions region;
525 struct led_classdev led_cdev;
537 struct mt76_mmio mmio;
550 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
551 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
552 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__)
553 #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__)
554 #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__)
556 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val)
557 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0)
559 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
560 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
561 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
562 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__)
563 #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__)
564 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
565 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
567 #define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
568 #define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__)
569 #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
570 #define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
572 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
573 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
575 #define mt76_get_field(_dev, _reg, _field) \
576 FIELD_GET(_field, mt76_rr(dev, _reg))
578 #define mt76_rmw_field(_dev, _reg, _field, _val) \
579 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
581 #define __mt76_rmw_field(_dev, _reg, _field, _val) \
582 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
584 #define mt76_hw(dev) (dev)->mt76.hw
586 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
589 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
591 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
594 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
596 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
597 void mt76_pci_disable_aspm(struct pci_dev *pdev);
599 static inline u16 mt76_chip(struct mt76_dev *dev)
601 return dev->rev >> 16;
604 static inline u16 mt76_rev(struct mt76_dev *dev)
606 return dev->rev & 0xffff;
609 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
610 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
612 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
613 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
614 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
615 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
616 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
617 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
618 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
620 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
621 const struct ieee80211_ops *ops,
622 const struct mt76_driver_ops *drv_ops);
623 int mt76_register_device(struct mt76_dev *dev, bool vht,
624 struct ieee80211_rate *rates, int n_rates);
625 void mt76_unregister_device(struct mt76_dev *dev);
626 void mt76_free_device(struct mt76_dev *dev);
628 struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
629 int mt76_queues_read(struct seq_file *s, void *data);
630 void mt76_seq_puts_array(struct seq_file *file, const char *str,
633 int mt76_eeprom_init(struct mt76_dev *dev, int len);
634 void mt76_eeprom_override(struct mt76_dev *dev);
637 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
639 return (u8 *)t - dev->drv->txwi_size;
642 /* increment with wrap-around */
643 static inline int mt76_incr(int val, int size)
645 return (val + 1) & (size - 1);
648 /* decrement with wrap-around */
649 static inline int mt76_decr(int val, int size)
651 return (val - 1) & (size - 1);
654 u8 mt76_ac_to_hwq(u8 ac);
656 static inline struct ieee80211_txq *
657 mtxq_to_txq(struct mt76_txq *mtxq)
661 return container_of(ptr, struct ieee80211_txq, drv_priv);
664 static inline struct ieee80211_sta *
665 wcid_to_sta(struct mt76_wcid *wcid)
669 if (!wcid || !wcid->sta)
672 return container_of(ptr, struct ieee80211_sta, drv_priv);
675 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
677 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
678 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
679 return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data);
682 static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
684 int len = ieee80211_get_hdrlen_from_skb(skb);
690 memmove(skb->data, skb->data + 2, len);
693 skb->data[len + 1] = 0;
696 static inline bool mt76_is_skb_pktid(u8 pktid)
698 if (pktid & MT_PACKET_ID_HAS_RATE)
701 return pktid >= MT_PACKET_ID_FIRST;
704 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
705 void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
706 struct mt76_wcid *wcid, struct sk_buff *skb);
707 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
708 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
709 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
710 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
712 void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid);
713 void mt76_txq_schedule_all(struct mt76_dev *dev);
714 void mt76_tx_tasklet(unsigned long data);
715 void mt76_release_buffered_frames(struct ieee80211_hw *hw,
716 struct ieee80211_sta *sta,
717 u16 tids, int nframes,
718 enum ieee80211_frame_release_type reason,
720 bool mt76_has_tx_pending(struct mt76_dev *dev);
721 void mt76_set_channel(struct mt76_dev *dev);
722 void mt76_update_survey(struct mt76_dev *dev);
723 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
724 struct survey_info *survey);
725 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
727 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
729 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
731 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
732 struct ieee80211_key_conf *key);
734 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
735 __acquires(&dev->status_list.lock);
736 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
737 __releases(&dev->status_list.lock);
739 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
740 struct sk_buff *skb);
741 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
742 struct mt76_wcid *wcid, int pktid,
743 struct sk_buff_head *list);
744 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
745 struct sk_buff_head *list);
746 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);
747 void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
749 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
750 struct ieee80211_sta *sta,
751 enum ieee80211_sta_state old_state,
752 enum ieee80211_sta_state new_state);
753 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
754 struct ieee80211_sta *sta);
756 int mt76_get_min_avg_rssi(struct mt76_dev *dev);
758 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
761 void mt76_csa_check(struct mt76_dev *dev);
762 void mt76_csa_finish(struct mt76_dev *dev);
764 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
765 int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
766 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
767 int mt76_get_rate(struct mt76_dev *dev,
768 struct ieee80211_supported_band *sband,
770 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
772 void mt76_sw_scan_complete(struct ieee80211_hw *hw,
773 struct ieee80211_vif *vif);
774 u32 mt76_calc_tx_airtime(struct mt76_dev *dev, struct ieee80211_tx_info *info,
778 void mt76_tx_free(struct mt76_dev *dev);
779 struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
780 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
781 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
782 struct napi_struct *napi);
783 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
784 struct napi_struct *napi);
785 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
786 u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
790 static inline bool mt76u_urb_error(struct urb *urb)
792 return urb->status &&
793 urb->status != -ECONNRESET &&
794 urb->status != -ESHUTDOWN &&
795 urb->status != -ENOENT;
798 /* Map hardware queues to usb endpoints */
799 static inline u8 q2ep(u8 qid)
801 /* TODO: take management packets to queue 5 */
806 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
809 struct usb_interface *uintf = to_usb_interface(dev->dev);
810 struct usb_device *udev = interface_to_usbdev(uintf);
811 struct mt76_usb *usb = &dev->usb;
815 pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]);
817 pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
819 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
822 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
823 u8 req_type, u16 val, u16 offset,
824 void *buf, size_t len);
825 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
826 const u16 offset, const u32 val);
827 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
828 void mt76u_deinit(struct mt76_dev *dev);
829 int mt76u_alloc_queues(struct mt76_dev *dev);
830 void mt76u_stop_tx(struct mt76_dev *dev);
831 void mt76u_stop_rx(struct mt76_dev *dev);
832 int mt76u_resume_rx(struct mt76_dev *dev);
833 void mt76u_queues_deinit(struct mt76_dev *dev);
836 mt76_mcu_msg_alloc(const void *data, int head_len,
837 int data_len, int tail_len);
838 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
839 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
840 unsigned long expires);
842 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);