1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc.
4 * This file is written based on mt76/usb.c.
6 * Author: Felix Fietkau <nbd@nbd.name>
7 * Lorenzo Bianconi <lorenzo@kernel.org>
8 * Sean Wang <sean.wang@mediatek.com>
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/mmc/sdio_func.h>
15 #include <linux/sched.h>
16 #include <linux/kthread.h>
21 mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
23 struct mt76_queue *q = &dev->q_rx[qid];
25 spin_lock_init(&q->lock);
26 q->entry = devm_kcalloc(dev->dev,
27 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
32 q->ndesc = MT_NUM_RX_ENTRIES;
33 q->head = q->tail = 0;
39 static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev)
43 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
45 return ERR_PTR(-ENOMEM);
47 spin_lock_init(&q->lock);
48 q->entry = devm_kcalloc(dev->dev,
49 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
52 return ERR_PTR(-ENOMEM);
54 q->ndesc = MT_NUM_TX_ENTRIES;
59 static int mt76s_alloc_tx(struct mt76_dev *dev)
64 for (i = 0; i <= MT_TXQ_PSD; i++) {
65 q = mt76s_alloc_tx_queue(dev);
73 q = mt76s_alloc_tx_queue(dev);
78 dev->q_mcu[MT_MCUQ_WM] = q;
83 int mt76s_alloc_queues(struct mt76_dev *dev)
87 err = mt76s_alloc_rx_queue(dev, MT_RXQ_MAIN);
91 return mt76s_alloc_tx(dev);
93 EXPORT_SYMBOL_GPL(mt76s_alloc_queues);
95 static struct mt76_queue_entry *
96 mt76s_get_next_rx_entry(struct mt76_queue *q)
98 struct mt76_queue_entry *e = NULL;
100 spin_lock_bh(&q->lock);
102 e = &q->entry[q->tail];
103 q->tail = (q->tail + 1) % q->ndesc;
106 spin_unlock_bh(&q->lock);
112 mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
114 int qid = q - &dev->q_rx[MT_RXQ_MAIN];
118 struct mt76_queue_entry *e;
120 if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
123 e = mt76s_get_next_rx_entry(q);
127 dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
131 if (qid == MT_RXQ_MAIN)
132 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
137 static void mt76s_net_worker(struct mt76_worker *w)
139 struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
141 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
150 mt76_for_each_q_rx(dev, i)
151 nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
155 } while (nframes > 0);
158 static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
160 struct mt76_queue_entry entry;
167 mcu = q == dev->q_mcu[MT_MCUQ_WM];
168 while (q->queued > 0) {
169 if (!q->entry[q->tail].done)
172 entry = q->entry[q->tail];
173 q->entry[q->tail].done = false;
176 dev_kfree_skb(entry.skb);
180 mt76_queue_tx_complete(dev, q, &entry);
185 wake_up(&dev->tx_wait);
188 mt76_txq_schedule(&dev->phy, q->qid);
193 static void mt76s_status_worker(struct mt76_worker *w)
195 struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
197 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
201 nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
203 for (i = 0; i <= MT_TXQ_PSD; i++)
204 nframes += mt76s_process_tx_queue(dev,
207 if (dev->drv->tx_status_data &&
208 !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
209 queue_work(dev->wq, &dev->sdio.stat_work);
210 } while (nframes > 0);
213 static void mt76s_tx_status_data(struct work_struct *work)
215 struct mt76_sdio *sdio;
216 struct mt76_dev *dev;
220 sdio = container_of(work, struct mt76_sdio, stat_work);
221 dev = container_of(sdio, struct mt76_dev, sdio);
224 if (test_bit(MT76_REMOVED, &dev->phy.state))
227 if (!dev->drv->tx_status_data(dev, &update))
232 if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
233 queue_work(dev->wq, &sdio->stat_work);
235 clear_bit(MT76_READING_STATS, &dev->phy.state);
239 mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
240 struct sk_buff *skb, struct mt76_wcid *wcid,
241 struct ieee80211_sta *sta)
243 struct mt76_tx_info tx_info = {
246 int err, len = skb->len;
249 if (q->queued == q->ndesc)
252 skb->prev = skb->next = NULL;
253 err = dev->drv->tx_prepare_skb(dev, NULL, q->qid, wcid, sta, &tx_info);
257 q->entry[q->head].skb = tx_info.skb;
258 q->entry[q->head].buf_sz = len;
259 q->head = (q->head + 1) % q->ndesc;
266 mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
267 struct sk_buff *skb, u32 tx_info)
269 int ret = -ENOSPC, len = skb->len, pad;
271 if (q->queued == q->ndesc)
274 pad = round_up(skb->len, 4) - skb->len;
275 ret = mt76_skb_adjust_pad(skb, pad);
279 spin_lock_bh(&q->lock);
281 q->entry[q->head].buf_sz = len;
282 q->entry[q->head].skb = skb;
283 q->head = (q->head + 1) % q->ndesc;
286 spin_unlock_bh(&q->lock);
296 static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
298 struct mt76_sdio *sdio = &dev->sdio;
300 mt76_worker_schedule(&sdio->txrx_worker);
303 static const struct mt76_queue_ops sdio_queue_ops = {
304 .tx_queue_skb = mt76s_tx_queue_skb,
305 .kick = mt76s_tx_kick,
306 .tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
309 void mt76s_deinit(struct mt76_dev *dev)
311 struct mt76_sdio *sdio = &dev->sdio;
314 mt76_worker_teardown(&sdio->txrx_worker);
315 mt76_worker_teardown(&sdio->status_worker);
316 mt76_worker_teardown(&sdio->net_worker);
318 cancel_work_sync(&sdio->stat_work);
319 clear_bit(MT76_READING_STATS, &dev->phy.state);
321 mt76_tx_status_check(dev, NULL, true);
323 sdio_claim_host(sdio->func);
324 sdio_release_irq(sdio->func);
325 sdio_release_host(sdio->func);
327 mt76_for_each_q_rx(dev, i) {
328 struct mt76_queue *q = &dev->q_rx[i];
331 for (j = 0; j < q->ndesc; j++) {
332 struct mt76_queue_entry *e = &q->entry[j];
337 dev_kfree_skb(e->skb);
342 EXPORT_SYMBOL_GPL(mt76s_deinit);
344 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
345 const struct mt76_bus_ops *bus_ops)
347 struct mt76_sdio *sdio = &dev->sdio;
350 err = mt76_worker_setup(dev->hw, &sdio->status_worker,
351 mt76s_status_worker, "sdio-status");
355 err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker,
360 sched_set_fifo_low(sdio->status_worker.task);
361 sched_set_fifo_low(sdio->net_worker.task);
363 INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
365 dev->queue_ops = &sdio_queue_ops;
367 dev->sdio.func = func;
371 EXPORT_SYMBOL_GPL(mt76s_init);
373 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
374 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
375 MODULE_LICENSE("Dual BSD/GPL");