2 * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #define SEQ_MODULO 0x1000
21 #define SEQ_MASK 0xfff
23 static inline int seq_less(u16 sq1, u16 sq2)
25 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
28 static inline u16 seq_inc(u16 sq)
30 return (sq + 1) & SEQ_MASK;
33 static inline u16 seq_sub(u16 sq1, u16 sq2)
35 return (sq1 - sq2) & SEQ_MASK;
38 static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq)
40 return seq_sub(seq, r->ssn) % r->buf_size;
43 static void wil_release_reorder_frame(struct wil6210_priv *wil,
44 struct wil_tid_ampdu_rx *r,
47 struct net_device *ndev = wil_to_ndev(wil);
48 struct sk_buff *skb = r->reorder_buf[index];
53 /* release the frame from the reorder ring buffer */
55 r->reorder_buf[index] = NULL;
56 wil_netif_rx_any(skb, ndev);
59 r->head_seq_num = seq_inc(r->head_seq_num);
62 static void wil_release_reorder_frames(struct wil6210_priv *wil,
63 struct wil_tid_ampdu_rx *r,
68 /* note: this function is never called with
69 * hseq preceding r->head_seq_num, i.e it is always true
70 * !seq_less(hseq, r->head_seq_num)
71 * and thus on loop exit it should be
72 * r->head_seq_num == hseq
74 while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) {
75 index = reorder_index(r, r->head_seq_num);
76 wil_release_reorder_frame(wil, r, index);
78 r->head_seq_num = hseq;
81 static void wil_reorder_release(struct wil6210_priv *wil,
82 struct wil_tid_ampdu_rx *r)
84 int index = reorder_index(r, r->head_seq_num);
86 while (r->reorder_buf[index]) {
87 wil_release_reorder_frame(wil, r, index);
88 index = reorder_index(r, r->head_seq_num);
92 /* called in NAPI context */
93 void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
94 __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
96 struct net_device *ndev = wil_to_ndev(wil);
97 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
98 int tid = wil_rxdesc_tid(d);
99 int cid = wil_rxdesc_cid(d);
100 int mid = wil_rxdesc_mid(d);
101 u16 seq = wil_rxdesc_seq(d);
102 int mcast = wil_rxdesc_mcast(d);
103 struct wil_sta_info *sta = &wil->sta[cid];
104 struct wil_tid_ampdu_rx *r;
108 wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
109 mid, cid, tid, seq, mcast);
111 if (unlikely(mcast)) {
112 wil_netif_rx_any(skb, ndev);
116 spin_lock(&sta->tid_rx_lock);
118 r = sta->tid_rx[tid];
120 wil_netif_rx_any(skb, ndev);
125 hseq = r->head_seq_num;
127 /** Due to the race between WMI events, where BACK establishment
128 * reported, and data Rx, few packets may be pass up before reorder
129 * buffer get allocated. Catch up by pretending SSN is what we
130 * see in the 1-st Rx packet
132 * Another scenario, Rx get delayed and we got packet from before
133 * BACK. Pass it to the stack and wait.
136 r->first_time = false;
137 if (seq != r->head_seq_num) {
138 if (seq_less(seq, r->head_seq_num)) {
140 "Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n",
141 seq, r->head_seq_num);
142 r->first_time = true;
143 wil_netif_rx_any(skb, ndev);
147 "Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n",
148 seq, r->head_seq_num);
149 r->head_seq_num = seq;
154 /* frame with out of date sequence number */
155 if (seq_less(seq, r->head_seq_num)) {
156 r->ssn_last_drop = seq;
158 wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
159 seq, r->head_seq_num);
165 * If frame the sequence number exceeds our buffering window
166 * size release some previous frames to make room for this one.
168 if (!seq_less(seq, r->head_seq_num + r->buf_size)) {
169 hseq = seq_inc(seq_sub(seq, r->buf_size));
170 /* release stored frames up to new head to stack */
171 wil_release_reorder_frames(wil, r, hseq);
174 /* Now the new frame is always in the range of the reordering buffer */
176 index = reorder_index(r, seq);
178 /* check if we already stored this frame */
179 if (r->reorder_buf[index]) {
181 wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
187 * If the current MPDU is in the right order and nothing else
188 * is stored we can process it directly, no need to buffer it.
189 * If it is first but there's something stored, we may be able
190 * to release frames after this one.
192 if (seq == r->head_seq_num && r->stored_mpdu_num == 0) {
193 r->head_seq_num = seq_inc(r->head_seq_num);
194 wil_netif_rx_any(skb, ndev);
198 /* put the frame in the reordering buffer */
199 r->reorder_buf[index] = skb;
200 r->reorder_time[index] = jiffies;
201 r->stored_mpdu_num++;
202 wil_reorder_release(wil, r);
205 spin_unlock(&sta->tid_rx_lock);
208 struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
211 struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL);
217 kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL);
219 kcalloc(size, sizeof(unsigned long), GFP_KERNEL);
220 if (!r->reorder_buf || !r->reorder_time) {
221 kfree(r->reorder_buf);
222 kfree(r->reorder_time);
228 r->head_seq_num = ssn;
230 r->stored_mpdu_num = 0;
231 r->first_time = true;
235 void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
236 struct wil_tid_ampdu_rx *r)
240 wil_release_reorder_frames(wil, r, r->head_seq_num + r->buf_size);
241 kfree(r->reorder_buf);
242 kfree(r->reorder_time);
246 /* ADDBA processing */
247 static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
249 u16 max_agg_size = min_t(u16, WIL_MAX_AGG_WSIZE, WIL_MAX_AMPDU_SIZE /
250 (mtu_max + WIL_MAX_MPDU_OVERHEAD));
255 return min(max_agg_size, req_agg_wsize);
258 /* Block Ack - Rx side (recipient */
259 int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
260 u8 dialog_token, __le16 ba_param_set,
261 __le16 ba_timeout, __le16 ba_seq_ctrl)
263 struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL);
268 req->cidxtid = cidxtid;
269 req->dialog_token = dialog_token;
270 req->ba_param_set = le16_to_cpu(ba_param_set);
271 req->ba_timeout = le16_to_cpu(ba_timeout);
272 req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl);
274 mutex_lock(&wil->back_rx_mutex);
275 list_add_tail(&req->list, &wil->back_rx_pending);
276 mutex_unlock(&wil->back_rx_mutex);
278 queue_work(wil->wq_service, &wil->back_rx_worker);
283 static void wil_back_rx_handle(struct wil6210_priv *wil,
284 struct wil_back_rx *req)
285 __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
287 struct wil_sta_info *sta;
290 /* bit 0: A-MSDU supported
291 * bit 1: policy (should be 0 for us)
293 * bits 6..15: buffer size
295 u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15);
296 bool agg_amsdu = !!(req->ba_param_set & BIT(0));
297 int ba_policy = req->ba_param_set & BIT(1);
298 u16 agg_timeout = req->ba_timeout;
299 u16 status = WLAN_STATUS_SUCCESS;
300 u16 ssn = req->ba_seq_ctrl >> 4;
301 struct wil_tid_ampdu_rx *r;
305 parse_cidxtid(req->cidxtid, &cid, &tid);
308 if (cid >= WIL6210_MAX_CID) {
309 wil_err(wil, "BACK: invalid CID %d\n", cid);
313 sta = &wil->sta[cid];
314 if (sta->status != wil_sta_connected) {
315 wil_err(wil, "BACK: CID %d not connected\n", cid);
320 "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
321 cid, sta->addr, tid, req_agg_wsize, req->ba_timeout,
322 agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn);
326 wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
327 status = WLAN_STATUS_INVALID_QOS_PARAM;
329 if (status == WLAN_STATUS_SUCCESS)
330 agg_wsize = wil_agg_size(wil, req_agg_wsize);
332 rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status,
333 agg_amsdu, agg_wsize, agg_timeout);
334 if (rc || (status != WLAN_STATUS_SUCCESS))
338 r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
339 spin_lock_bh(&sta->tid_rx_lock);
340 wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
341 sta->tid_rx[tid] = r;
342 spin_unlock_bh(&sta->tid_rx_lock);
345 void wil_back_rx_flush(struct wil6210_priv *wil)
347 struct wil_back_rx *evt, *t;
349 wil_dbg_misc(wil, "%s()\n", __func__);
351 mutex_lock(&wil->back_rx_mutex);
353 list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) {
354 list_del(&evt->list);
358 mutex_unlock(&wil->back_rx_mutex);
361 /* Retrieve next ADDBA request from the pending list */
362 static struct list_head *next_back_rx(struct wil6210_priv *wil)
364 struct list_head *ret = NULL;
366 mutex_lock(&wil->back_rx_mutex);
368 if (!list_empty(&wil->back_rx_pending)) {
369 ret = wil->back_rx_pending.next;
373 mutex_unlock(&wil->back_rx_mutex);
378 void wil_back_rx_worker(struct work_struct *work)
380 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
382 struct wil_back_rx *evt;
383 struct list_head *lh;
385 while ((lh = next_back_rx(wil)) != NULL) {
386 evt = list_entry(lh, struct wil_back_rx, list);
388 wil_back_rx_handle(wil, evt);
393 /* BACK - Tx (originator) side */
394 static void wil_back_tx_handle(struct wil6210_priv *wil,
395 struct wil_back_tx *req)
397 struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid];
400 if (txdata->addba_in_progress) {
401 wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
405 if (txdata->agg_wsize) {
407 "ADDBA for vring[%d] already established wsize %d\n",
408 req->ringid, txdata->agg_wsize);
411 txdata->addba_in_progress = true;
412 rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout);
414 txdata->addba_in_progress = false;
417 static struct list_head *next_back_tx(struct wil6210_priv *wil)
419 struct list_head *ret = NULL;
421 mutex_lock(&wil->back_tx_mutex);
423 if (!list_empty(&wil->back_tx_pending)) {
424 ret = wil->back_tx_pending.next;
428 mutex_unlock(&wil->back_tx_mutex);
433 void wil_back_tx_worker(struct work_struct *work)
435 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
437 struct wil_back_tx *evt;
438 struct list_head *lh;
440 while ((lh = next_back_tx(wil)) != NULL) {
441 evt = list_entry(lh, struct wil_back_tx, list);
443 wil_back_tx_handle(wil, evt);
448 void wil_back_tx_flush(struct wil6210_priv *wil)
450 struct wil_back_tx *evt, *t;
452 wil_dbg_misc(wil, "%s()\n", __func__);
454 mutex_lock(&wil->back_tx_mutex);
456 list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) {
457 list_del(&evt->list);
461 mutex_unlock(&wil->back_tx_mutex);
464 int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
466 struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL);
471 req->ringid = ringid;
472 req->agg_wsize = wil_agg_size(wil, wsize);
473 req->agg_timeout = 0;
475 mutex_lock(&wil->back_tx_mutex);
476 list_add_tail(&req->list, &wil->back_tx_pending);
477 mutex_unlock(&wil->back_tx_mutex);
479 queue_work(wil->wq_service, &wil->back_tx_worker);