1 /* SPDX-License-Identifier: ISC */
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
9 #include <linux/kernel.h>
11 #include <linux/spinlock.h>
12 #include <linux/skbuff.h>
13 #include <linux/leds.h>
14 #include <linux/usb.h>
15 #include <linux/average.h>
16 #include <net/mac80211.h>
19 #define MT_TX_RING_SIZE 256
20 #define MT_MCU_RING_SIZE 32
21 #define MT_RX_BUF_SIZE 2048
22 #define MT_SKB_HEAD_LEN 128
28 struct mt76_reg_pair {
39 u32 (*rr)(struct mt76_dev *dev, u32 offset);
40 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
41 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
42 void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data,
44 void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data,
46 int (*wr_rp)(struct mt76_dev *dev, u32 base,
47 const struct mt76_reg_pair *rp, int len);
48 int (*rd_rp)(struct mt76_dev *dev, u32 base,
49 struct mt76_reg_pair *rp, int len);
50 enum mt76_bus_type type;
53 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB)
54 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO)
57 MT_TXQ_VO = IEEE80211_AC_VO,
58 MT_TXQ_VI = IEEE80211_AC_VI,
59 MT_TXQ_BE = IEEE80211_AC_BE,
60 MT_TXQ_BK = IEEE80211_AC_BK,
75 struct mt76_queue_buf {
81 struct mt76_queue_buf buf[32];
87 struct mt76_queue_entry {
93 struct mt76_txwi_cache *txwi;
102 struct mt76_queue_regs {
107 } __packed __aligned(4);
110 struct mt76_queue_regs __iomem *regs;
113 struct mt76_queue_entry *entry;
114 struct mt76_desc *desc;
128 struct sk_buff *rx_head;
129 struct page_frag_cache rx_page;
132 struct mt76_sw_queue {
133 struct mt76_queue *q;
135 struct list_head swq;
139 struct mt76_mcu_ops {
140 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
141 int len, bool wait_resp);
142 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
143 const struct mt76_reg_pair *rp, int len);
144 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
145 struct mt76_reg_pair *rp, int len);
146 int (*mcu_restart)(struct mt76_dev *dev);
149 struct mt76_queue_ops {
150 int (*init)(struct mt76_dev *dev);
152 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
153 int idx, int n_desc, int bufsize,
156 int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
157 struct sk_buff *skb, struct mt76_wcid *wcid,
158 struct ieee80211_sta *sta);
160 int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid,
161 struct sk_buff *skb, u32 tx_info);
163 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
164 int *len, u32 *info, bool *more);
166 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
168 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
171 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
174 enum mt76_wcid_flags {
175 MT_WCID_FLAG_CHECK_PS,
179 #define MT76_N_WCIDS 128
181 /* stored in ieee80211_tx_info::hw_queue */
182 #define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
184 DECLARE_EWMA(signal, 10, 8);
186 #define MT_WCID_TX_INFO_RATE GENMASK(15, 0)
187 #define MT_WCID_TX_INFO_NSS GENMASK(17, 16)
188 #define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18)
189 #define MT_WCID_TX_INFO_SET BIT(31)
192 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
196 struct ewma_signal rssi;
205 u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
215 struct mt76_sw_queue *swq;
216 struct mt76_wcid *wcid;
218 struct sk_buff_head retry_q;
225 struct mt76_txwi_cache {
226 struct list_head list;
233 struct rcu_head rcu_head;
235 struct mt76_dev *dev;
238 struct delayed_work reorder_work;
244 u8 started:1, stopped:1, timer_pending:1;
246 struct sk_buff *reorder_buf[];
249 #define MT_TX_CB_DMA_DONE BIT(0)
250 #define MT_TX_CB_TXS_DONE BIT(1)
251 #define MT_TX_CB_TXS_FAILED BIT(2)
253 #define MT_PACKET_ID_MASK GENMASK(6, 0)
254 #define MT_PACKET_ID_NO_ACK 0
255 #define MT_PACKET_ID_NO_SKB 1
256 #define MT_PACKET_ID_FIRST 2
257 #define MT_PACKET_ID_HAS_RATE BIT(7)
259 #define MT_TX_STATUS_SKB_TIMEOUT HZ
262 unsigned long jiffies;
269 MT76_STATE_INITIALIZED,
271 MT76_STATE_MCU_RUNNING,
283 #define MT_DRV_TXWI_NO_FREE BIT(0)
284 #define MT_DRV_TX_ALIGNED4_SKBS BIT(1)
285 #define MT_DRV_SW_RX_AIRTIME BIT(2)
287 struct mt76_driver_ops {
292 void (*update_survey)(struct mt76_dev *dev);
294 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
295 enum mt76_txq_id qid, struct mt76_wcid *wcid,
296 struct ieee80211_sta *sta,
297 struct mt76_tx_info *tx_info);
299 void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
300 struct mt76_queue_entry *e);
302 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
304 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
305 struct sk_buff *skb);
307 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
309 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
312 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
313 struct ieee80211_sta *sta);
315 void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
316 struct ieee80211_sta *sta);
318 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
319 struct ieee80211_sta *sta);
322 struct mt76_channel_state {
331 struct ieee80211_supported_band sband;
332 struct mt76_channel_state *chan;
335 struct mt76_rate_power {
349 #define MT_VEND_TYPE_EEPROM BIT(31)
350 #define MT_VEND_TYPE_CFG BIT(30)
351 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
353 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
355 MT_VEND_DEV_MODE = 0x1,
357 MT_VEND_MULTI_WRITE = 0x6,
358 MT_VEND_MULTI_READ = 0x7,
359 MT_VEND_READ_EEPROM = 0x9,
360 MT_VEND_WRITE_FCE = 0x42,
361 MT_VEND_WRITE_CFG = 0x46,
362 MT_VEND_READ_CFG = 0x47,
372 MT_EP_OUT_INBAND_CMD,
381 #define MT_TX_SG_MAX_SIZE 8
382 #define MT_RX_SG_MAX_SIZE 1
383 #define MT_NUM_TX_ENTRIES 256
384 #define MT_NUM_RX_ENTRIES 128
385 #define MCU_RESP_URB_SIZE 1024
387 struct mutex usb_ctrl_mtx;
393 struct tasklet_struct rx_tasklet;
394 struct workqueue_struct *stat_wq;
395 struct work_struct stat_work;
397 u8 out_ep[__MT_EP_OUT_MAX];
398 u8 in_ep[__MT_EP_IN_MAX];
407 struct mt76_reg_pair *rp;
418 wait_queue_head_t wait;
419 struct sk_buff_head res_q;
428 struct mt76_rx_status {
430 struct mt76_wcid *wcid;
434 unsigned long reorder_time;
454 s8 chain_signal[IEEE80211_MAX_CHAINS];
458 struct ieee80211_hw *hw;
459 struct mt76_dev *dev;
461 struct cfg80211_chan_def chandef;
462 struct ieee80211_channel *main_chan;
464 struct mt76_channel_state *chan_state;
467 struct mt76_sband sband_2g;
468 struct mt76_sband sband_5g;
472 struct mt76_phy phy; /* must be first */
474 struct mt76_phy *phy2;
476 struct ieee80211_hw *hw;
483 struct mt76_rx_status rx_ampdu_status;
489 const struct mt76_bus_ops *bus;
490 const struct mt76_driver_ops *drv;
491 const struct mt76_mcu_ops *mcu_ops;
494 struct net_device napi_dev;
496 struct napi_struct napi[__MT_RXQ_MAX];
497 struct sk_buff_head rx_skb[__MT_RXQ_MAX];
500 struct list_head txwi_cache;
501 struct mt76_sw_queue q_tx[2 * __MT_TXQ_MAX];
502 struct mt76_queue q_rx[__MT_RXQ_MAX];
503 const struct mt76_queue_ops *queue_ops;
506 struct tasklet_struct tx_tasklet;
507 struct napi_struct tx_napi;
508 struct delayed_work mac_work;
510 wait_queue_head_t tx_wait;
511 struct sk_buff_head status_list;
513 unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];
514 unsigned long wcid_phy_mask[MT76_N_WCIDS / BITS_PER_LONG];
516 struct mt76_wcid global_wcid;
517 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
519 u8 macaddr[ETH_ALEN];
528 struct tasklet_struct pre_tbtt_tasklet;
532 struct debugfs_blob_wrapper eeprom;
533 struct debugfs_blob_wrapper otp;
534 struct mt76_hw_cap cap;
536 struct mt76_rate_power rate_power;
540 enum nl80211_dfs_regions region;
544 struct led_classdev led_cdev;
554 struct mt76_mmio mmio;
567 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
568 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
569 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__)
570 #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__)
571 #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__)
573 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val)
574 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0)
576 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
577 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
578 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
579 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__)
580 #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__)
581 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
582 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
584 #define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
585 #define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__)
586 #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
587 #define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
589 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
590 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
592 #define mt76_get_field(_dev, _reg, _field) \
593 FIELD_GET(_field, mt76_rr(dev, _reg))
595 #define mt76_rmw_field(_dev, _reg, _field, _val) \
596 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
598 #define __mt76_rmw_field(_dev, _reg, _field, _val) \
599 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
601 #define mt76_hw(dev) (dev)->mphy.hw
603 static inline struct ieee80211_hw *
604 mt76_wcid_hw(struct mt76_dev *dev, u8 wcid)
606 if (wcid <= MT76_N_WCIDS &&
607 mt76_wcid_mask_test(dev->wcid_phy_mask, wcid))
608 return dev->phy2->hw;
613 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
616 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
618 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
621 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
623 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
624 void mt76_pci_disable_aspm(struct pci_dev *pdev);
626 static inline u16 mt76_chip(struct mt76_dev *dev)
628 return dev->rev >> 16;
631 static inline u16 mt76_rev(struct mt76_dev *dev)
633 return dev->rev & 0xffff;
636 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
637 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
639 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
640 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
641 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
642 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
643 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
644 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
645 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
647 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
648 const struct ieee80211_ops *ops,
649 const struct mt76_driver_ops *drv_ops);
650 int mt76_register_device(struct mt76_dev *dev, bool vht,
651 struct ieee80211_rate *rates, int n_rates);
652 void mt76_unregister_device(struct mt76_dev *dev);
653 void mt76_free_device(struct mt76_dev *dev);
655 struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
656 int mt76_queues_read(struct seq_file *s, void *data);
657 void mt76_seq_puts_array(struct seq_file *file, const char *str,
660 int mt76_eeprom_init(struct mt76_dev *dev, int len);
661 void mt76_eeprom_override(struct mt76_dev *dev);
663 static inline struct ieee80211_hw *
664 mt76_phy_hw(struct mt76_dev *dev, bool phy_ext)
666 if (phy_ext && dev->phy2)
667 return dev->phy2->hw;
672 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
674 return (u8 *)t - dev->drv->txwi_size;
677 /* increment with wrap-around */
678 static inline int mt76_incr(int val, int size)
680 return (val + 1) & (size - 1);
683 /* decrement with wrap-around */
684 static inline int mt76_decr(int val, int size)
686 return (val - 1) & (size - 1);
689 u8 mt76_ac_to_hwq(u8 ac);
691 static inline struct ieee80211_txq *
692 mtxq_to_txq(struct mt76_txq *mtxq)
696 return container_of(ptr, struct ieee80211_txq, drv_priv);
699 static inline struct ieee80211_sta *
700 wcid_to_sta(struct mt76_wcid *wcid)
704 if (!wcid || !wcid->sta)
707 return container_of(ptr, struct ieee80211_sta, drv_priv);
710 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
712 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
713 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
714 return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data);
717 static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
719 int len = ieee80211_get_hdrlen_from_skb(skb);
725 memmove(skb->data, skb->data + 2, len);
728 skb->data[len + 1] = 0;
731 static inline bool mt76_is_skb_pktid(u8 pktid)
733 if (pktid & MT_PACKET_ID_HAS_RATE)
736 return pktid >= MT_PACKET_ID_FIRST;
739 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
740 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
741 struct mt76_wcid *wcid, struct sk_buff *skb);
742 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
743 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
744 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
745 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
747 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
748 void mt76_txq_schedule_all(struct mt76_phy *phy);
749 void mt76_tx_tasklet(unsigned long data);
750 void mt76_release_buffered_frames(struct ieee80211_hw *hw,
751 struct ieee80211_sta *sta,
752 u16 tids, int nframes,
753 enum ieee80211_frame_release_type reason,
755 bool mt76_has_tx_pending(struct mt76_phy *phy);
756 void mt76_set_channel(struct mt76_phy *phy);
757 void mt76_update_survey(struct mt76_dev *dev);
758 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
759 struct survey_info *survey);
760 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
762 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
764 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
766 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
767 struct ieee80211_key_conf *key);
769 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
770 __acquires(&dev->status_list.lock);
771 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
772 __releases(&dev->status_list.lock);
774 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
775 struct sk_buff *skb);
776 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
777 struct mt76_wcid *wcid, int pktid,
778 struct sk_buff_head *list);
779 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
780 struct sk_buff_head *list);
781 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);
782 void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
784 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
785 struct ieee80211_sta *sta,
786 enum ieee80211_sta_state old_state,
787 enum ieee80211_sta_state new_state);
788 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
789 struct ieee80211_sta *sta);
791 int mt76_get_min_avg_rssi(struct mt76_dev *dev);
793 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
796 void mt76_csa_check(struct mt76_dev *dev);
797 void mt76_csa_finish(struct mt76_dev *dev);
799 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
800 int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
801 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
802 int mt76_get_rate(struct mt76_dev *dev,
803 struct ieee80211_supported_band *sband,
805 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
807 void mt76_sw_scan_complete(struct ieee80211_hw *hw,
808 struct ieee80211_vif *vif);
809 u32 mt76_calc_tx_airtime(struct mt76_dev *dev, struct ieee80211_tx_info *info,
813 static inline struct ieee80211_hw *
814 mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
816 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
817 struct ieee80211_hw *hw = dev->phy.hw;
819 if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2)
822 info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY;
827 void mt76_tx_free(struct mt76_dev *dev);
828 struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
829 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
830 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
831 struct napi_struct *napi);
832 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
833 struct napi_struct *napi);
834 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
835 u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
839 static inline bool mt76u_urb_error(struct urb *urb)
841 return urb->status &&
842 urb->status != -ECONNRESET &&
843 urb->status != -ESHUTDOWN &&
844 urb->status != -ENOENT;
847 /* Map hardware queues to usb endpoints */
848 static inline u8 q2ep(u8 qid)
850 /* TODO: take management packets to queue 5 */
855 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
858 struct usb_interface *uintf = to_usb_interface(dev->dev);
859 struct usb_device *udev = interface_to_usbdev(uintf);
860 struct mt76_usb *usb = &dev->usb;
864 pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]);
866 pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
868 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
871 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
872 u8 req_type, u16 val, u16 offset,
873 void *buf, size_t len);
874 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
875 const u16 offset, const u32 val);
876 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
877 void mt76u_deinit(struct mt76_dev *dev);
878 int mt76u_alloc_queues(struct mt76_dev *dev);
879 void mt76u_stop_tx(struct mt76_dev *dev);
880 void mt76u_stop_rx(struct mt76_dev *dev);
881 int mt76u_resume_rx(struct mt76_dev *dev);
882 void mt76u_queues_deinit(struct mt76_dev *dev);
885 mt76_mcu_msg_alloc(const void *data, int head_len,
886 int data_len, int tail_len);
887 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
888 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
889 unsigned long expires);
891 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);