2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 * Support for Copy Engine hardware, which is mainly used for
24 * communication between Host and Target over a PCIe interconnect.
28 * A single CopyEngine (CE) comprises two "rings":
32 * Each ring consists of a number of descriptors which specify
33 * an address, length, and meta-data.
35 * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target)
36 * controls one ring and the other side controls the other ring.
37 * The source side chooses when to initiate a transfer and it
38 * chooses what to send (buffer address, length). The destination
39 * side keeps a supply of "anonymous receive buffers" available and
40 * it handles incoming data as it arrives (when the destination
41 * receives an interrupt).
43 * The sender may send a simple buffer (address/length) or it may
44 * send a small list of buffers. When a small list is sent, hardware
45 * "gathers" these and they end up in a single destination buffer
46 * with a single interrupt.
48 * There are several "contexts" managed by this layer -- more, it
49 * may seem -- than should be needed. These are provided mainly for
50 * maximum flexibility and especially to facilitate a simpler HIF
51 * implementation. There are per-CopyEngine recv, send, and watermark
52 * contexts. These are supplied by the caller when a recv, send,
53 * or watermark handler is established and they are echoed back to
54 * the caller when the respective callbacks are invoked. There is
55 * also a per-transfer context supplied by the caller when a buffer
56 * (or sendlist) is sent and when a buffer is enqueued for recv.
57 * These per-transfer contexts are echoed back to the caller when
58 * the buffer is sent/received.
61 static inline unsigned int
62 ath10k_set_ring_byte(unsigned int offset,
63 struct ath10k_hw_ce_regs_addr_map *addr_map)
65 return ((offset << addr_map->lsb) & addr_map->mask);
68 static inline unsigned int
69 ath10k_get_ring_byte(unsigned int offset,
70 struct ath10k_hw_ce_regs_addr_map *addr_map)
72 return ((offset & addr_map->mask) >> (addr_map->lsb));
75 static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset)
77 struct ath10k_ce *ce = ath10k_ce_priv(ar);
79 return ce->bus_ops->read32(ar, offset);
82 static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value)
84 struct ath10k_ce *ce = ath10k_ce_priv(ar);
86 ce->bus_ops->write32(ar, offset, value);
89 static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
93 ath10k_ce_write32(ar, ce_ctrl_addr +
94 ar->hw_ce_regs->dst_wr_index_addr, n);
97 static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
100 return ath10k_ce_read32(ar, ce_ctrl_addr +
101 ar->hw_ce_regs->dst_wr_index_addr);
104 static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
108 ath10k_ce_write32(ar, ce_ctrl_addr +
109 ar->hw_ce_regs->sr_wr_index_addr, n);
112 static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
115 return ath10k_ce_read32(ar, ce_ctrl_addr +
116 ar->hw_ce_regs->sr_wr_index_addr);
119 static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
122 return ath10k_ce_read32(ar, ce_ctrl_addr +
123 ar->hw_ce_regs->current_srri_addr);
126 static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
130 ath10k_ce_write32(ar, ce_ctrl_addr +
131 ar->hw_ce_regs->sr_base_addr, addr);
134 static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
138 ath10k_ce_write32(ar, ce_ctrl_addr +
139 ar->hw_ce_regs->sr_size_addr, n);
142 static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
146 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
148 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
151 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
152 (ctrl1_addr & ~(ctrl_regs->dmax->mask)) |
153 ath10k_set_ring_byte(n, ctrl_regs->dmax));
156 static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
160 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
162 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
165 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
166 (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
167 ath10k_set_ring_byte(n, ctrl_regs->src_ring));
170 static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
174 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
176 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
179 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
180 (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
181 ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
184 static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
187 return ath10k_ce_read32(ar, ce_ctrl_addr +
188 ar->hw_ce_regs->current_drri_addr);
191 static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
195 ath10k_ce_write32(ar, ce_ctrl_addr +
196 ar->hw_ce_regs->dr_base_addr, addr);
199 static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
203 ath10k_ce_write32(ar, ce_ctrl_addr +
204 ar->hw_ce_regs->dr_size_addr, n);
207 static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
211 struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
212 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
214 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
215 (addr & ~(srcr_wm->wm_high->mask)) |
216 (ath10k_set_ring_byte(n, srcr_wm->wm_high)));
219 static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
223 struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
224 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
226 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
227 (addr & ~(srcr_wm->wm_low->mask)) |
228 (ath10k_set_ring_byte(n, srcr_wm->wm_low)));
231 static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
235 struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
236 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
238 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
239 (addr & ~(dstr_wm->wm_high->mask)) |
240 (ath10k_set_ring_byte(n, dstr_wm->wm_high)));
243 static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
247 struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
248 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
250 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
251 (addr & ~(dstr_wm->wm_low->mask)) |
252 (ath10k_set_ring_byte(n, dstr_wm->wm_low)));
255 static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
258 struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
260 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
261 ar->hw_ce_regs->host_ie_addr);
263 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
264 host_ie_addr | host_ie->copy_complete->mask);
267 static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
270 struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
272 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
273 ar->hw_ce_regs->host_ie_addr);
275 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
276 host_ie_addr & ~(host_ie->copy_complete->mask));
279 static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
282 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
284 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
285 ar->hw_ce_regs->host_ie_addr);
287 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
288 host_ie_addr & ~(wm_regs->wm_mask));
291 static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
294 struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
296 u32 misc_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
297 ar->hw_ce_regs->misc_ie_addr);
299 ath10k_ce_write32(ar,
300 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
301 misc_ie_addr | misc_regs->err_mask);
304 static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
307 struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
309 u32 misc_ie_addr = ath10k_ce_read32(ar,
310 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
312 ath10k_ce_write32(ar,
313 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
314 misc_ie_addr & ~(misc_regs->err_mask));
317 static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
321 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
323 ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
327 * Guts of ath10k_ce_send.
328 * The caller takes responsibility for any needed locking.
330 int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
331 void *per_transfer_context,
334 unsigned int transfer_id,
337 struct ath10k *ar = ce_state->ar;
338 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
339 struct ce_desc *desc, sdesc;
340 unsigned int nentries_mask = src_ring->nentries_mask;
341 unsigned int sw_index = src_ring->sw_index;
342 unsigned int write_index = src_ring->write_index;
343 u32 ctrl_addr = ce_state->ctrl_addr;
347 if (nbytes > ce_state->src_sz_max)
348 ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
349 __func__, nbytes, ce_state->src_sz_max);
351 if (unlikely(CE_RING_DELTA(nentries_mask,
352 write_index, sw_index - 1) <= 0)) {
357 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
360 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
362 if (flags & CE_SEND_FLAG_GATHER)
363 desc_flags |= CE_DESC_FLAGS_GATHER;
364 if (flags & CE_SEND_FLAG_BYTE_SWAP)
365 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
367 sdesc.addr = __cpu_to_le32(buffer);
368 sdesc.nbytes = __cpu_to_le16(nbytes);
369 sdesc.flags = __cpu_to_le16(desc_flags);
373 src_ring->per_transfer_context[write_index] = per_transfer_context;
375 /* Update Source Ring Write Index */
376 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
379 if (!(flags & CE_SEND_FLAG_GATHER))
380 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
382 src_ring->write_index = write_index;
387 void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
389 struct ath10k *ar = pipe->ar;
390 struct ath10k_ce *ce = ath10k_ce_priv(ar);
391 struct ath10k_ce_ring *src_ring = pipe->src_ring;
392 u32 ctrl_addr = pipe->ctrl_addr;
394 lockdep_assert_held(&ce->ce_lock);
397 * This function must be called only if there is an incomplete
398 * scatter-gather transfer (before index register is updated)
399 * that needs to be cleaned up.
401 if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
404 if (WARN_ON_ONCE(src_ring->write_index ==
405 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
408 src_ring->write_index--;
409 src_ring->write_index &= src_ring->nentries_mask;
411 src_ring->per_transfer_context[src_ring->write_index] = NULL;
414 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
415 void *per_transfer_context,
418 unsigned int transfer_id,
421 struct ath10k *ar = ce_state->ar;
422 struct ath10k_ce *ce = ath10k_ce_priv(ar);
425 spin_lock_bh(&ce->ce_lock);
426 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
427 buffer, nbytes, transfer_id, flags);
428 spin_unlock_bh(&ce->ce_lock);
433 int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
435 struct ath10k *ar = pipe->ar;
436 struct ath10k_ce *ce = ath10k_ce_priv(ar);
439 spin_lock_bh(&ce->ce_lock);
440 delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
441 pipe->src_ring->write_index,
442 pipe->src_ring->sw_index - 1);
443 spin_unlock_bh(&ce->ce_lock);
448 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
450 struct ath10k *ar = pipe->ar;
451 struct ath10k_ce *ce = ath10k_ce_priv(ar);
452 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
453 unsigned int nentries_mask = dest_ring->nentries_mask;
454 unsigned int write_index = dest_ring->write_index;
455 unsigned int sw_index = dest_ring->sw_index;
457 lockdep_assert_held(&ce->ce_lock);
459 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
462 int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
464 struct ath10k *ar = pipe->ar;
465 struct ath10k_ce *ce = ath10k_ce_priv(ar);
466 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
467 unsigned int nentries_mask = dest_ring->nentries_mask;
468 unsigned int write_index = dest_ring->write_index;
469 unsigned int sw_index = dest_ring->sw_index;
470 struct ce_desc *base = dest_ring->base_addr_owner_space;
471 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
472 u32 ctrl_addr = pipe->ctrl_addr;
474 lockdep_assert_held(&ce->ce_lock);
476 if ((pipe->id != 5) &&
477 CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
480 desc->addr = __cpu_to_le32(paddr);
483 dest_ring->per_transfer_context[write_index] = ctx;
484 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
485 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
486 dest_ring->write_index = write_index;
491 void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
493 struct ath10k *ar = pipe->ar;
494 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
495 unsigned int nentries_mask = dest_ring->nentries_mask;
496 unsigned int write_index = dest_ring->write_index;
497 u32 ctrl_addr = pipe->ctrl_addr;
498 u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
500 /* Prevent CE ring stuck issue that will occur when ring is full.
501 * Make sure that write index is 1 less than read index.
503 if ((cur_write_idx + nentries) == dest_ring->sw_index)
506 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
507 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
508 dest_ring->write_index = write_index;
511 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
513 struct ath10k *ar = pipe->ar;
514 struct ath10k_ce *ce = ath10k_ce_priv(ar);
517 spin_lock_bh(&ce->ce_lock);
518 ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
519 spin_unlock_bh(&ce->ce_lock);
525 * Guts of ath10k_ce_completed_recv_next.
526 * The caller takes responsibility for any necessary locking.
528 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
529 void **per_transfer_contextp,
530 unsigned int *nbytesp)
532 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
533 unsigned int nentries_mask = dest_ring->nentries_mask;
534 unsigned int sw_index = dest_ring->sw_index;
536 struct ce_desc *base = dest_ring->base_addr_owner_space;
537 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
538 struct ce_desc sdesc;
541 /* Copy in one go for performance reasons */
544 nbytes = __le16_to_cpu(sdesc.nbytes);
547 * This closes a relatively unusual race where the Host
548 * sees the updated DRRI before the update to the
549 * corresponding descriptor has completed. We treat this
550 * as a descriptor that is not yet done.
557 /* Return data from completed destination descriptor */
560 if (per_transfer_contextp)
561 *per_transfer_contextp =
562 dest_ring->per_transfer_context[sw_index];
564 /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
565 * So update transfer context all CEs except CE5.
567 if (ce_state->id != 5)
568 dest_ring->per_transfer_context[sw_index] = NULL;
570 /* Update sw_index */
571 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
572 dest_ring->sw_index = sw_index;
577 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
578 void **per_transfer_contextp,
579 unsigned int *nbytesp)
581 struct ath10k *ar = ce_state->ar;
582 struct ath10k_ce *ce = ath10k_ce_priv(ar);
585 spin_lock_bh(&ce->ce_lock);
586 ret = ath10k_ce_completed_recv_next_nolock(ce_state,
587 per_transfer_contextp,
589 spin_unlock_bh(&ce->ce_lock);
594 int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
595 void **per_transfer_contextp,
598 struct ath10k_ce_ring *dest_ring;
599 unsigned int nentries_mask;
600 unsigned int sw_index;
601 unsigned int write_index;
604 struct ath10k_ce *ce;
606 dest_ring = ce_state->dest_ring;
612 ce = ath10k_ce_priv(ar);
614 spin_lock_bh(&ce->ce_lock);
616 nentries_mask = dest_ring->nentries_mask;
617 sw_index = dest_ring->sw_index;
618 write_index = dest_ring->write_index;
619 if (write_index != sw_index) {
620 struct ce_desc *base = dest_ring->base_addr_owner_space;
621 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
623 /* Return data from completed destination descriptor */
624 *bufferp = __le32_to_cpu(desc->addr);
626 if (per_transfer_contextp)
627 *per_transfer_contextp =
628 dest_ring->per_transfer_context[sw_index];
631 dest_ring->per_transfer_context[sw_index] = NULL;
634 /* Update sw_index */
635 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
636 dest_ring->sw_index = sw_index;
642 spin_unlock_bh(&ce->ce_lock);
648 * Guts of ath10k_ce_completed_send_next.
649 * The caller takes responsibility for any necessary locking.
651 int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
652 void **per_transfer_contextp)
654 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
655 u32 ctrl_addr = ce_state->ctrl_addr;
656 struct ath10k *ar = ce_state->ar;
657 unsigned int nentries_mask = src_ring->nentries_mask;
658 unsigned int sw_index = src_ring->sw_index;
659 unsigned int read_index;
660 struct ce_desc *desc;
662 if (src_ring->hw_index == sw_index) {
664 * The SW completion index has caught up with the cached
665 * version of the HW completion index.
666 * Update the cached HW completion index to see whether
667 * the SW has really caught up to the HW, or if the cached
668 * value of the HW index has become stale.
671 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
672 if (read_index == 0xffffffff)
675 read_index &= nentries_mask;
676 src_ring->hw_index = read_index;
679 read_index = src_ring->hw_index;
681 if (read_index == sw_index)
684 if (per_transfer_contextp)
685 *per_transfer_contextp =
686 src_ring->per_transfer_context[sw_index];
689 src_ring->per_transfer_context[sw_index] = NULL;
690 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
694 /* Update sw_index */
695 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
696 src_ring->sw_index = sw_index;
701 /* NB: Modeled after ath10k_ce_completed_send_next */
702 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
703 void **per_transfer_contextp,
705 unsigned int *nbytesp,
706 unsigned int *transfer_idp)
708 struct ath10k_ce_ring *src_ring;
709 unsigned int nentries_mask;
710 unsigned int sw_index;
711 unsigned int write_index;
714 struct ath10k_ce *ce;
716 src_ring = ce_state->src_ring;
722 ce = ath10k_ce_priv(ar);
724 spin_lock_bh(&ce->ce_lock);
726 nentries_mask = src_ring->nentries_mask;
727 sw_index = src_ring->sw_index;
728 write_index = src_ring->write_index;
730 if (write_index != sw_index) {
731 struct ce_desc *base = src_ring->base_addr_owner_space;
732 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
734 /* Return data from completed source descriptor */
735 *bufferp = __le32_to_cpu(desc->addr);
736 *nbytesp = __le16_to_cpu(desc->nbytes);
737 *transfer_idp = MS(__le16_to_cpu(desc->flags),
738 CE_DESC_FLAGS_META_DATA);
740 if (per_transfer_contextp)
741 *per_transfer_contextp =
742 src_ring->per_transfer_context[sw_index];
745 src_ring->per_transfer_context[sw_index] = NULL;
747 /* Update sw_index */
748 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
749 src_ring->sw_index = sw_index;
755 spin_unlock_bh(&ce->ce_lock);
760 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
761 void **per_transfer_contextp)
763 struct ath10k *ar = ce_state->ar;
764 struct ath10k_ce *ce = ath10k_ce_priv(ar);
767 spin_lock_bh(&ce->ce_lock);
768 ret = ath10k_ce_completed_send_next_nolock(ce_state,
769 per_transfer_contextp);
770 spin_unlock_bh(&ce->ce_lock);
776 * Guts of interrupt handler for per-engine interrupts on a particular CE.
778 * Invokes registered callbacks for recv_complete,
779 * send_complete, and watermarks.
781 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
783 struct ath10k_ce *ce = ath10k_ce_priv(ar);
784 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
785 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
786 u32 ctrl_addr = ce_state->ctrl_addr;
788 spin_lock_bh(&ce->ce_lock);
790 /* Clear the copy-complete interrupts that will be handled here. */
791 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
794 spin_unlock_bh(&ce->ce_lock);
796 if (ce_state->recv_cb)
797 ce_state->recv_cb(ce_state);
799 if (ce_state->send_cb)
800 ce_state->send_cb(ce_state);
802 spin_lock_bh(&ce->ce_lock);
805 * Misc CE interrupts are not being handled, but still need
808 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask);
810 spin_unlock_bh(&ce->ce_lock);
814 * Handler for per-engine interrupts on ALL active CEs.
815 * This is used in cases where the system is sharing a
816 * single interrput for all CEs
819 void ath10k_ce_per_engine_service_any(struct ath10k *ar)
824 intr_summary = ath10k_ce_interrupt_summary(ar);
826 for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
827 if (intr_summary & (1 << ce_id))
828 intr_summary &= ~(1 << ce_id);
830 /* no intr pending on this CE */
833 ath10k_ce_per_engine_service(ar, ce_id);
838 * Adjust interrupts for the copy complete handler.
839 * If it's needed for either send or recv, then unmask
840 * this interrupt; otherwise, mask it.
842 * Called with ce_lock held.
844 static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
846 u32 ctrl_addr = ce_state->ctrl_addr;
847 struct ath10k *ar = ce_state->ar;
848 bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
850 if ((!disable_copy_compl_intr) &&
851 (ce_state->send_cb || ce_state->recv_cb))
852 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
854 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
856 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
859 int ath10k_ce_disable_interrupts(struct ath10k *ar)
863 for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
864 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
866 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
867 ath10k_ce_error_intr_disable(ar, ctrl_addr);
868 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
874 void ath10k_ce_enable_interrupts(struct ath10k *ar)
876 struct ath10k_ce *ce = ath10k_ce_priv(ar);
878 struct ath10k_ce_pipe *ce_state;
880 /* Skip the last copy engine, CE7 the diagnostic window, as that
881 * uses polling and isn't initialized for interrupts.
883 for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) {
884 ce_state = &ce->ce_states[ce_id];
885 ath10k_ce_per_engine_handler_adjust(ce_state);
889 static int ath10k_ce_init_src_ring(struct ath10k *ar,
891 const struct ce_attr *attr)
893 struct ath10k_ce *ce = ath10k_ce_priv(ar);
894 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
895 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
896 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
898 nentries = roundup_pow_of_two(attr->src_nentries);
900 memset(src_ring->base_addr_owner_space, 0,
901 nentries * sizeof(struct ce_desc));
903 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
904 src_ring->sw_index &= src_ring->nentries_mask;
905 src_ring->hw_index = src_ring->sw_index;
907 src_ring->write_index =
908 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
909 src_ring->write_index &= src_ring->nentries_mask;
911 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
912 src_ring->base_addr_ce_space);
913 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
914 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
915 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
916 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
917 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
919 ath10k_dbg(ar, ATH10K_DBG_BOOT,
920 "boot init ce src ring id %d entries %d base_addr %pK\n",
921 ce_id, nentries, src_ring->base_addr_owner_space);
926 static int ath10k_ce_init_dest_ring(struct ath10k *ar,
928 const struct ce_attr *attr)
930 struct ath10k_ce *ce = ath10k_ce_priv(ar);
931 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
932 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
933 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
935 nentries = roundup_pow_of_two(attr->dest_nentries);
937 memset(dest_ring->base_addr_owner_space, 0,
938 nentries * sizeof(struct ce_desc));
940 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
941 dest_ring->sw_index &= dest_ring->nentries_mask;
942 dest_ring->write_index =
943 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
944 dest_ring->write_index &= dest_ring->nentries_mask;
946 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
947 dest_ring->base_addr_ce_space);
948 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
949 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
950 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
951 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
953 ath10k_dbg(ar, ATH10K_DBG_BOOT,
954 "boot ce dest ring id %d entries %d base_addr %pK\n",
955 ce_id, nentries, dest_ring->base_addr_owner_space);
960 static struct ath10k_ce_ring *
961 ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
962 const struct ce_attr *attr)
964 struct ath10k_ce_ring *src_ring;
965 u32 nentries = attr->src_nentries;
966 dma_addr_t base_addr;
968 nentries = roundup_pow_of_two(nentries);
970 src_ring = kzalloc(sizeof(*src_ring) +
972 sizeof(*src_ring->per_transfer_context)),
974 if (src_ring == NULL)
975 return ERR_PTR(-ENOMEM);
977 src_ring->nentries = nentries;
978 src_ring->nentries_mask = nentries - 1;
981 * Legacy platforms that do not support cache
982 * coherent DMA are unsupported
984 src_ring->base_addr_owner_space_unaligned =
985 dma_alloc_coherent(ar->dev,
986 (nentries * sizeof(struct ce_desc) +
988 &base_addr, GFP_KERNEL);
989 if (!src_ring->base_addr_owner_space_unaligned) {
991 return ERR_PTR(-ENOMEM);
994 src_ring->base_addr_ce_space_unaligned = base_addr;
996 src_ring->base_addr_owner_space = PTR_ALIGN(
997 src_ring->base_addr_owner_space_unaligned,
999 src_ring->base_addr_ce_space = ALIGN(
1000 src_ring->base_addr_ce_space_unaligned,
1001 CE_DESC_RING_ALIGN);
1006 static struct ath10k_ce_ring *
1007 ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
1008 const struct ce_attr *attr)
1010 struct ath10k_ce_ring *dest_ring;
1012 dma_addr_t base_addr;
1014 nentries = roundup_pow_of_two(attr->dest_nentries);
1016 dest_ring = kzalloc(sizeof(*dest_ring) +
1018 sizeof(*dest_ring->per_transfer_context)),
1020 if (dest_ring == NULL)
1021 return ERR_PTR(-ENOMEM);
1023 dest_ring->nentries = nentries;
1024 dest_ring->nentries_mask = nentries - 1;
1027 * Legacy platforms that do not support cache
1028 * coherent DMA are unsupported
1030 dest_ring->base_addr_owner_space_unaligned =
1031 dma_zalloc_coherent(ar->dev,
1032 (nentries * sizeof(struct ce_desc) +
1033 CE_DESC_RING_ALIGN),
1034 &base_addr, GFP_KERNEL);
1035 if (!dest_ring->base_addr_owner_space_unaligned) {
1037 return ERR_PTR(-ENOMEM);
1040 dest_ring->base_addr_ce_space_unaligned = base_addr;
1042 dest_ring->base_addr_owner_space = PTR_ALIGN(
1043 dest_ring->base_addr_owner_space_unaligned,
1044 CE_DESC_RING_ALIGN);
1045 dest_ring->base_addr_ce_space = ALIGN(
1046 dest_ring->base_addr_ce_space_unaligned,
1047 CE_DESC_RING_ALIGN);
1053 * Initialize a Copy Engine based on caller-supplied attributes.
1054 * This may be called once to initialize both source and destination
1055 * rings or it may be called twice for separate source and destination
1056 * initialization. It may be that only one side or the other is
1057 * initialized by software/firmware.
1059 int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
1060 const struct ce_attr *attr)
1064 if (attr->src_nentries) {
1065 ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
1067 ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
1073 if (attr->dest_nentries) {
1074 ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
1076 ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
1085 static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1087 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1089 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
1090 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
1091 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
1092 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
1095 static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
1097 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1099 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
1100 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
1101 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
1104 void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
1106 ath10k_ce_deinit_src_ring(ar, ce_id);
1107 ath10k_ce_deinit_dest_ring(ar, ce_id);
1110 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1111 const struct ce_attr *attr)
1113 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1114 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1118 * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
1119 * additional TX locking checks.
1121 * For the lack of a better place do the check here.
1123 BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
1124 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1125 BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
1126 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1127 BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
1128 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1131 ce_state->id = ce_id;
1132 ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1133 ce_state->attr_flags = attr->flags;
1134 ce_state->src_sz_max = attr->src_sz_max;
1136 if (attr->src_nentries)
1137 ce_state->send_cb = attr->send_cb;
1139 if (attr->dest_nentries)
1140 ce_state->recv_cb = attr->recv_cb;
1142 if (attr->src_nentries) {
1143 ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
1144 if (IS_ERR(ce_state->src_ring)) {
1145 ret = PTR_ERR(ce_state->src_ring);
1146 ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
1148 ce_state->src_ring = NULL;
1153 if (attr->dest_nentries) {
1154 ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
1156 if (IS_ERR(ce_state->dest_ring)) {
1157 ret = PTR_ERR(ce_state->dest_ring);
1158 ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
1160 ce_state->dest_ring = NULL;
1168 void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1170 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1171 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1173 if (ce_state->src_ring) {
1174 dma_free_coherent(ar->dev,
1175 (ce_state->src_ring->nentries *
1176 sizeof(struct ce_desc) +
1177 CE_DESC_RING_ALIGN),
1178 ce_state->src_ring->base_addr_owner_space,
1179 ce_state->src_ring->base_addr_ce_space);
1180 kfree(ce_state->src_ring);
1183 if (ce_state->dest_ring) {
1184 dma_free_coherent(ar->dev,
1185 (ce_state->dest_ring->nentries *
1186 sizeof(struct ce_desc) +
1187 CE_DESC_RING_ALIGN),
1188 ce_state->dest_ring->base_addr_owner_space,
1189 ce_state->dest_ring->base_addr_ce_space);
1190 kfree(ce_state->dest_ring);
1193 ce_state->src_ring = NULL;
1194 ce_state->dest_ring = NULL;
1197 void ath10k_ce_dump_registers(struct ath10k *ar,
1198 struct ath10k_fw_crash_data *crash_data)
1200 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1201 struct ath10k_ce_crash_data ce_data;
1204 lockdep_assert_held(&ar->data_lock);
1206 ath10k_err(ar, "Copy Engine register dump:\n");
1208 spin_lock_bh(&ce->ce_lock);
1209 for (id = 0; id < CE_COUNT; id++) {
1210 addr = ath10k_ce_base_address(ar, id);
1211 ce_data.base_addr = cpu_to_le32(addr);
1213 ce_data.src_wr_idx =
1214 cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
1216 cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
1217 ce_data.dst_wr_idx =
1218 cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
1220 cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));
1223 crash_data->ce_crash_data[id] = ce_data;
1225 ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
1226 le32_to_cpu(ce_data.base_addr),
1227 le32_to_cpu(ce_data.src_wr_idx),
1228 le32_to_cpu(ce_data.src_r_idx),
1229 le32_to_cpu(ce_data.dst_wr_idx),
1230 le32_to_cpu(ce_data.dst_r_idx));
1233 spin_unlock_bh(&ce->ce_lock);