1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc.
4 * Author: Felix Fietkau <nbd@nbd.name>
5 * Lorenzo Bianconi <lorenzo@kernel.org>
6 * Sean Wang <sean.wang@mediatek.com>
9 #include <linux/kernel.h>
10 #include <linux/iopoll.h>
11 #include <linux/module.h>
13 #include <linux/mmc/host.h>
14 #include <linux/mmc/sdio_ids.h>
15 #include <linux/mmc/sdio_func.h>
22 static int mt7663s_refill_sched_quota(struct mt76_dev *dev, u32 *data)
24 u32 ple_ac_data_quota[] = {
25 FIELD_GET(TXQ_CNT_L, data[4]), /* VO */
26 FIELD_GET(TXQ_CNT_H, data[3]), /* VI */
27 FIELD_GET(TXQ_CNT_L, data[3]), /* BE */
28 FIELD_GET(TXQ_CNT_H, data[2]), /* BK */
30 u32 pse_ac_data_quota[] = {
31 FIELD_GET(TXQ_CNT_H, data[1]), /* VO */
32 FIELD_GET(TXQ_CNT_L, data[1]), /* VI */
33 FIELD_GET(TXQ_CNT_H, data[0]), /* BE */
34 FIELD_GET(TXQ_CNT_L, data[0]), /* BK */
36 u32 pse_mcu_quota = FIELD_GET(TXQ_CNT_L, data[2]);
37 u32 pse_data_quota = 0, ple_data_quota = 0;
38 struct mt76_sdio *sdio = &dev->sdio;
41 for (i = 0; i < ARRAY_SIZE(pse_ac_data_quota); i++) {
42 pse_data_quota += pse_ac_data_quota[i];
43 ple_data_quota += ple_ac_data_quota[i];
46 if (!pse_data_quota && !ple_data_quota && !pse_mcu_quota)
49 sdio->sched.pse_mcu_quota += pse_mcu_quota;
50 sdio->sched.pse_data_quota += pse_data_quota;
51 sdio->sched.ple_data_quota += ple_data_quota;
53 return pse_data_quota + ple_data_quota + pse_mcu_quota;
56 static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len,
59 int len = min_t(int, data_len, MT_SKB_HEAD_LEN);
62 skb = alloc_skb(len, GFP_KERNEL);
66 skb_put_data(skb, data, len);
71 page = virt_to_head_page(data);
72 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
73 page, data - page_address(page),
74 data_len - len, buf_len);
81 static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
82 struct mt76s_intr *intr)
84 struct mt76_queue *q = &dev->q_rx[qid];
85 struct mt76_sdio *sdio = &dev->sdio;
86 int len = 0, err, i, order;
90 for (i = 0; i < intr->rx.num[qid]; i++)
91 len += round_up(intr->rx.len[qid][i] + 4, 4);
96 if (len > sdio->func->cur_blksize)
97 len = roundup(len, sdio->func->cur_blksize);
99 order = get_order(len);
100 page = __dev_alloc_pages(GFP_KERNEL, order);
104 buf = page_address(page);
106 err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
108 dev_err(dev->dev, "sdio read data failed:%d\n", err);
109 __free_pages(page, order);
113 for (i = 0; i < intr->rx.num[qid]; i++) {
114 int index = (q->head + i) % q->ndesc;
115 struct mt76_queue_entry *e = &q->entry[index];
117 len = intr->rx.len[qid][i];
118 e->skb = mt7663s_build_rx_skb(buf, len, round_up(len + 4, 4));
122 buf += round_up(len + 4, 4);
123 if (q->queued + i + 1 == q->ndesc)
126 __free_pages(page, order);
128 spin_lock_bh(&q->lock);
129 q->head = (q->head + i) % q->ndesc;
131 spin_unlock_bh(&q->lock);
136 static int mt7663s_rx_handler(struct mt76_dev *dev)
138 struct mt76_sdio *sdio = &dev->sdio;
139 struct mt76s_intr *intr = sdio->intr_data;
140 int nframes = 0, ret;
142 ret = sdio_readsb(sdio->func, intr, MCR_WHISR, sizeof(*intr));
146 trace_dev_irq(dev, intr->isr, 0);
148 if (intr->isr & WHIER_RX0_DONE_INT_EN) {
149 ret = mt7663s_rx_run_queue(dev, 0, intr);
151 mt76_worker_schedule(&sdio->net_worker);
156 if (intr->isr & WHIER_RX1_DONE_INT_EN) {
157 ret = mt7663s_rx_run_queue(dev, 1, intr);
159 mt76_worker_schedule(&sdio->net_worker);
164 nframes += !!mt7663s_refill_sched_quota(dev, intr->tx.wtqcr);
169 static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, bool mcu, int buf_sz,
170 int *pse_size, int *ple_size)
174 pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ);
177 if (sdio->sched.pse_mcu_quota < *pse_size + pse_sz)
180 if (sdio->sched.pse_data_quota < *pse_size + pse_sz ||
181 sdio->sched.ple_data_quota < *ple_size + 1)
184 *ple_size = *ple_size + 1;
186 *pse_size = *pse_size + pse_sz;
191 static void mt7663s_tx_update_quota(struct mt76_sdio *sdio, bool mcu,
192 int pse_size, int ple_size)
195 sdio->sched.pse_mcu_quota -= pse_size;
197 sdio->sched.pse_data_quota -= pse_size;
198 sdio->sched.ple_data_quota -= ple_size;
202 static int __mt7663s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
204 struct mt76_sdio *sdio = &dev->sdio;
207 if (len > sdio->func->cur_blksize)
208 len = roundup(len, sdio->func->cur_blksize);
210 err = sdio_writesb(sdio->func, MCR_WTDR1, data, len);
212 dev_err(dev->dev, "sdio write failed: %d\n", err);
217 static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
219 int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
220 bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
221 struct mt76_sdio *sdio = &dev->sdio;
223 qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid;
224 while (q->first != q->head) {
225 struct mt76_queue_entry *e = &q->entry[q->first];
226 struct sk_buff *iter;
228 if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
229 __skb_put_zero(e->skb, 4);
230 err = __mt7663s_xmit_queue(dev, e->skb->data,
238 if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ)
241 if (mt7663s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
245 memcpy(sdio->xmit_buf[qid] + len, e->skb->data,
246 skb_headlen(e->skb));
247 len += skb_headlen(e->skb);
250 skb_walk_frags(e->skb, iter) {
251 memcpy(sdio->xmit_buf[qid] + len, iter->data,
257 q->first = (q->first + 1) % q->ndesc;
262 memset(sdio->xmit_buf[qid] + len, 0, 4);
263 err = __mt7663s_xmit_queue(dev, sdio->xmit_buf[qid], len + 4);
267 mt7663s_tx_update_quota(sdio, mcu, pse_sz, ple_sz);
269 mt76_worker_schedule(&sdio->status_worker);
274 void mt7663s_txrx_worker(struct mt76_worker *w)
276 struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
278 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
281 /* disable interrupt */
282 sdio_claim_host(sdio->func);
283 sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
289 for (i = 0; i <= MT_TXQ_PSD; i++) {
290 ret = mt7663s_tx_run_queue(dev, dev->phy.q_tx[i]);
294 ret = mt7663s_tx_run_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
299 ret = mt7663s_rx_handler(dev);
302 } while (nframes > 0);
304 /* enable interrupt */
305 sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
306 sdio_release_host(sdio->func);
309 void mt7663s_sdio_irq(struct sdio_func *func)
311 struct mt7615_dev *dev = sdio_get_drvdata(func);
312 struct mt76_sdio *sdio = &dev->mt76.sdio;
314 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state))
317 mt76_worker_schedule(&sdio->txrx_worker);