1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
18 **********************************************************************/
19 #include <linux/pci.h>
20 #include <linux/netdevice.h>
21 #include <linux/vmalloc.h>
22 #include "liquidio_common.h"
23 #include "octeon_droq.h"
24 #include "octeon_iq.h"
25 #include "response_manager.h"
26 #include "octeon_device.h"
27 #include "octeon_main.h"
28 #include "octeon_network.h"
29 #include "cn66xx_device.h"
30 #include "cn23xx_pf_device.h"
31 #include "cn23xx_vf_device.h"
33 struct iq_post_status {
38 static void check_db_timeout(struct work_struct *work);
39 static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
41 static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
43 static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
45 struct octeon_instr_queue *iq =
46 (struct octeon_instr_queue *)oct->instr_queue[iq_no];
50 #define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
52 /* Define this to return the request status comaptible to old code */
53 /*#define OCTEON_USE_OLD_REQ_STATUS*/
55 /* Return 0 on success, 1 on failure */
56 int octeon_init_instr_queue(struct octeon_device *oct,
57 union oct_txpciq txpciq,
60 struct octeon_instr_queue *iq;
61 struct octeon_iq_config *conf = NULL;
62 u32 iq_no = (u32)txpciq.s.q_no;
64 struct cavium_wq *db_wq;
65 int numa_node = dev_to_node(&oct->pci_dev->dev);
67 if (OCTEON_CN6XXX(oct))
68 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
69 else if (OCTEON_CN23XX_PF(oct))
70 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_pf)));
71 else if (OCTEON_CN23XX_VF(oct))
72 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_vf)));
75 dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
80 q_size = (u32)conf->instr_type * num_descs;
82 iq = oct->instr_queue[iq_no];
86 iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma);
88 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
93 iq->max_count = num_descs;
95 /* Initialize a list to holds requests that have been posted to Octeon
96 * but has yet to be fetched by octeon
98 iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
100 if (!iq->request_list)
102 vmalloc(array_size(num_descs,
103 sizeof(*iq->request_list)));
104 if (!iq->request_list) {
105 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
106 dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
111 memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
113 dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n",
114 iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count);
116 iq->txpciq.u64 = txpciq.u64;
117 iq->fill_threshold = (u32)conf->db_min;
119 iq->host_write_index = 0;
120 iq->octeon_read_index = 0;
122 iq->last_db_time = 0;
123 iq->do_auto_flush = 1;
124 iq->db_timeout = (u32)conf->db_timeout;
125 atomic_set(&iq->instr_pending, 0);
126 iq->pkts_processed = 0;
128 /* Initialize the spinlock for this instruction queue */
129 spin_lock_init(&iq->lock);
131 iq->allow_soft_cmds = true;
132 spin_lock_init(&iq->post_lock);
134 iq->allow_soft_cmds = false;
137 spin_lock_init(&iq->iq_flush_running_lock);
139 oct->io_qmask.iq |= BIT_ULL(iq_no);
141 /* Set the 32B/64B mode for each input queue */
142 oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
143 iq->iqcmd_64B = (conf->instr_type == 64);
145 oct->fn_list.setup_iq_regs(oct, iq_no);
147 oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
150 if (!oct->check_db_wq[iq_no].wq) {
151 vfree(iq->request_list);
152 iq->request_list = NULL;
153 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
154 dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
159 db_wq = &oct->check_db_wq[iq_no];
161 INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
162 db_wq->wk.ctxptr = oct;
163 db_wq->wk.ctxul = iq_no;
164 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
169 int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
171 u64 desc_size = 0, q_size;
172 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
174 cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
175 destroy_workqueue(oct->check_db_wq[iq_no].wq);
177 if (OCTEON_CN6XXX(oct))
179 CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn6xxx));
180 else if (OCTEON_CN23XX_PF(oct))
182 CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_pf));
183 else if (OCTEON_CN23XX_VF(oct))
185 CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_vf));
187 vfree(iq->request_list);
190 q_size = iq->max_count * desc_size;
191 lio_dma_free(oct, (u32)q_size, iq->base_addr,
193 oct->io_qmask.iq &= ~(1ULL << iq_no);
194 vfree(oct->instr_queue[iq_no]);
195 oct->instr_queue[iq_no] = NULL;
202 /* Return 0 on success, 1 on failure */
203 int octeon_setup_iq(struct octeon_device *oct,
206 union oct_txpciq txpciq,
210 u32 iq_no = (u32)txpciq.s.q_no;
211 int numa_node = dev_to_node(&oct->pci_dev->dev);
213 if (oct->instr_queue[iq_no]) {
214 dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
216 oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
217 oct->instr_queue[iq_no]->app_ctx = app_ctx;
220 oct->instr_queue[iq_no] =
221 vzalloc_node(sizeof(struct octeon_instr_queue), numa_node);
222 if (!oct->instr_queue[iq_no])
223 oct->instr_queue[iq_no] =
224 vzalloc(sizeof(struct octeon_instr_queue));
225 if (!oct->instr_queue[iq_no])
229 oct->instr_queue[iq_no]->q_index = q_index;
230 oct->instr_queue[iq_no]->app_ctx = app_ctx;
231 oct->instr_queue[iq_no]->ifidx = ifidx;
233 if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
234 vfree(oct->instr_queue[iq_no]);
235 oct->instr_queue[iq_no] = NULL;
240 if (oct->fn_list.enable_io_queues(oct))
246 int lio_wait_for_instr_fetch(struct octeon_device *oct)
248 int i, retry = 1000, pending, instr_cnt = 0;
253 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
254 if (!(oct->io_qmask.iq & BIT_ULL(i)))
257 atomic_read(&oct->instr_queue[i]->instr_pending);
259 __check_db_timeout(oct, i);
260 instr_cnt += pending;
266 schedule_timeout_uninterruptible(1);
268 } while (retry-- && instr_cnt);
274 ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
276 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
277 writel(iq->fill_cnt, iq->doorbell_reg);
278 /* make sure doorbell write goes through */
280 iq->last_db_time = jiffies;
286 octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no)
288 struct octeon_instr_queue *iq;
290 iq = oct->instr_queue[iq_no];
291 spin_lock(&iq->post_lock);
293 ring_doorbell(oct, iq);
294 spin_unlock(&iq->post_lock);
297 static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
302 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
303 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
305 memcpy(iqptr, cmd, cmdsize);
308 static inline struct iq_post_status
309 __post_command2(struct octeon_instr_queue *iq, u8 *cmd)
311 struct iq_post_status st;
313 st.status = IQ_SEND_OK;
315 /* This ensures that the read index does not wrap around to the same
316 * position if queue gets full before Octeon could fetch any instr.
318 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
319 st.status = IQ_SEND_FAILED;
324 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
325 st.status = IQ_SEND_STOP;
327 __copy_cmd_into_iq(iq, cmd);
329 /* "index" is returned, host_write_index is modified. */
330 st.index = iq->host_write_index;
331 iq->host_write_index = incr_index(iq->host_write_index, 1,
335 /* Flush the command into memory. We need to be sure the data is in
336 * memory before indicating that the instruction is pending.
340 atomic_inc(&iq->instr_pending);
346 octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
349 if (reqtype > REQTYPE_LAST) {
350 dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
355 reqtype_free_fn[oct->octeon_id][reqtype] = fn;
361 __add_to_request_list(struct octeon_instr_queue *iq,
362 int idx, void *buf, int reqtype)
364 iq->request_list[idx].buf = buf;
365 iq->request_list[idx].reqtype = reqtype;
368 /* Can only run in process context */
370 lio_process_iq_request_list(struct octeon_device *oct,
371 struct octeon_instr_queue *iq, u32 napi_budget)
373 struct cavium_wq *cwq = &oct->dma_comp_wq;
376 u32 old = iq->flush_index;
378 unsigned int pkts_compl = 0, bytes_compl = 0;
379 struct octeon_soft_command *sc;
382 while (old != iq->octeon_read_index) {
383 reqtype = iq->request_list[old].reqtype;
384 buf = iq->request_list[old].buf;
386 if (reqtype == REQTYPE_NONE)
389 octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
393 case REQTYPE_NORESP_NET:
394 case REQTYPE_NORESP_NET_SG:
395 case REQTYPE_RESP_NET_SG:
396 reqtype_free_fn[oct->octeon_id][reqtype](buf);
398 case REQTYPE_RESP_NET:
399 case REQTYPE_SOFT_COMMAND:
401 /* We're expecting a response from Octeon.
402 * It's up to lio_process_ordered_list() to
403 * process sc. Add sc to the ordered soft
404 * command response list because we expect
405 * a response from Octeon.
407 spin_lock_irqsave(&oct->response_list
408 [OCTEON_ORDERED_SC_LIST].lock, flags);
409 atomic_inc(&oct->response_list
410 [OCTEON_ORDERED_SC_LIST].pending_req_count);
411 list_add_tail(&sc->node, &oct->response_list
412 [OCTEON_ORDERED_SC_LIST].head);
413 spin_unlock_irqrestore(&oct->response_list
414 [OCTEON_ORDERED_SC_LIST].lock,
418 dev_err(&oct->pci_dev->dev,
419 "%s Unknown reqtype: %d buf: %p at idx %d\n",
420 __func__, reqtype, buf, old);
423 iq->request_list[old].buf = NULL;
424 iq->request_list[old].reqtype = 0;
428 old = incr_index(old, 1, iq->max_count);
430 if ((napi_budget) && (inst_count >= napi_budget))
434 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
436 iq->flush_index = old;
438 if (atomic_read(&oct->response_list
439 [OCTEON_ORDERED_SC_LIST].pending_req_count))
440 queue_work(cwq->wq, &cwq->wk.work.work);
445 /* Can only be called from process context */
447 octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
450 u32 inst_processed = 0;
451 u32 tot_inst_processed = 0;
454 if (!spin_trylock(&iq->iq_flush_running_lock))
457 spin_lock_bh(&iq->lock);
459 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
462 /* Process any outstanding IQ packets. */
463 if (iq->flush_index == iq->octeon_read_index)
468 lio_process_iq_request_list(oct, iq,
473 lio_process_iq_request_list(oct, iq, 0);
475 if (inst_processed) {
476 iq->pkts_processed += inst_processed;
477 atomic_sub(inst_processed, &iq->instr_pending);
478 iq->stats.instr_processed += inst_processed;
481 tot_inst_processed += inst_processed;
482 } while (tot_inst_processed < napi_budget);
484 if (napi_budget && (tot_inst_processed >= napi_budget))
487 iq->last_db_time = jiffies;
489 spin_unlock_bh(&iq->lock);
491 spin_unlock(&iq->iq_flush_running_lock);
496 /* Process instruction queue after timeout.
497 * This routine gets called from a workqueue or when removing the module.
499 static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
501 struct octeon_instr_queue *iq;
507 iq = oct->instr_queue[iq_no];
511 /* return immediately, if no work pending */
512 if (!atomic_read(&iq->instr_pending))
514 /* If jiffies - last_db_time < db_timeout do nothing */
515 next_time = iq->last_db_time + iq->db_timeout;
516 if (!time_after(jiffies, (unsigned long)next_time))
518 iq->last_db_time = jiffies;
520 /* Flush the instruction queue */
521 octeon_flush_iq(oct, iq, 0);
523 lio_enable_irq(NULL, iq);
526 /* Called by the Poll thread at regular intervals to check the instruction
527 * queue for commands to be posted and for commands that were fetched by Octeon.
529 static void check_db_timeout(struct work_struct *work)
531 struct cavium_wk *wk = (struct cavium_wk *)work;
532 struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
533 u64 iq_no = wk->ctxul;
534 struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
537 __check_db_timeout(oct, iq_no);
538 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
542 octeon_send_command(struct octeon_device *oct, u32 iq_no,
543 u32 force_db, void *cmd, void *buf,
544 u32 datasize, u32 reqtype)
547 struct iq_post_status st;
548 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
550 /* Get the lock and prevent other tasks and tx interrupt handler from
553 if (iq->allow_soft_cmds)
554 spin_lock_bh(&iq->post_lock);
556 st = __post_command2(iq, cmd);
558 if (st.status != IQ_SEND_FAILED) {
559 xmit_stopped = octeon_report_sent_bytes_to_bql(buf, reqtype);
560 __add_to_request_list(iq, st.index, buf, reqtype);
561 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
562 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
564 if (iq->fill_cnt >= MAX_OCTEON_FILL_COUNT || force_db ||
565 xmit_stopped || st.status == IQ_SEND_STOP)
566 ring_doorbell(oct, iq);
568 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
571 if (iq->allow_soft_cmds)
572 spin_unlock_bh(&iq->post_lock);
574 /* This is only done here to expedite packets being flushed
575 * for cases where there are no IQ completion interrupts.
582 octeon_prepare_soft_command(struct octeon_device *oct,
583 struct octeon_soft_command *sc,
590 struct octeon_config *oct_cfg;
591 struct octeon_instr_ih2 *ih2;
592 struct octeon_instr_ih3 *ih3;
593 struct octeon_instr_pki_ih3 *pki_ih3;
594 struct octeon_instr_irh *irh;
595 struct octeon_instr_rdp *rdp;
597 WARN_ON(opcode > 15);
598 WARN_ON(subcode > 127);
600 oct_cfg = octeon_get_conf(oct);
602 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
603 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
605 ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
607 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
613 oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
615 pki_ih3->tag = LIO_CONTROL;
616 pki_ih3->tagtype = ATOMIC_TAG;
618 oct->instr_queue[sc->iq_no]->txpciq.s.ctrl_qpg;
624 ih3->dlengsz = sc->datasize;
626 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
627 irh->opcode = opcode;
628 irh->subcode = subcode;
630 /* opcode/subcode specific parameters (ossp) */
631 irh->ossp = irh_ossp;
632 sc->cmd.cmd3.ossp[0] = ossp0;
633 sc->cmd.cmd3.ossp[1] = ossp1;
636 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
637 rdp->pcie_port = oct->pcie_port;
638 rdp->rlen = sc->rdatasize;
642 /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
643 ih3->fsz = LIO_SOFTCMDRESP_IH3;
647 /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
648 ih3->fsz = LIO_PCICMD_O3;
652 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
653 ih2->tagtype = ATOMIC_TAG;
654 ih2->tag = LIO_CONTROL;
656 ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
659 ih2->dlengsz = sc->datasize;
663 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
664 irh->opcode = opcode;
665 irh->subcode = subcode;
667 /* opcode/subcode specific parameters (ossp) */
668 irh->ossp = irh_ossp;
669 sc->cmd.cmd2.ossp[0] = ossp0;
670 sc->cmd.cmd2.ossp[1] = ossp1;
673 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
674 rdp->pcie_port = oct->pcie_port;
675 rdp->rlen = sc->rdatasize;
678 /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
679 ih2->fsz = LIO_SOFTCMDRESP_IH2;
682 /* irh + ossp[0] + ossp[1] = 24 bytes */
683 ih2->fsz = LIO_PCICMD_O2;
688 int octeon_send_soft_command(struct octeon_device *oct,
689 struct octeon_soft_command *sc)
691 struct octeon_instr_queue *iq;
692 struct octeon_instr_ih2 *ih2;
693 struct octeon_instr_ih3 *ih3;
694 struct octeon_instr_irh *irh;
697 iq = oct->instr_queue[sc->iq_no];
698 if (!iq->allow_soft_cmds) {
699 dev_err(&oct->pci_dev->dev, "Soft commands are not allowed on Queue %d\n",
701 INCR_INSTRQUEUE_PKT_COUNT(oct, sc->iq_no, instr_dropped, 1);
702 return IQ_SEND_FAILED;
705 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
706 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
708 WARN_ON(!sc->dmadptr);
709 sc->cmd.cmd3.dptr = sc->dmadptr;
711 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
713 WARN_ON(!sc->dmarptr);
714 WARN_ON(!sc->status_word);
715 *sc->status_word = COMPLETION_WORD_INIT;
716 sc->cmd.cmd3.rptr = sc->dmarptr;
718 len = (u32)ih3->dlengsz;
720 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
722 WARN_ON(!sc->dmadptr);
723 sc->cmd.cmd2.dptr = sc->dmadptr;
725 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
727 WARN_ON(!sc->dmarptr);
728 WARN_ON(!sc->status_word);
729 *sc->status_word = COMPLETION_WORD_INIT;
730 sc->cmd.cmd2.rptr = sc->dmarptr;
732 len = (u32)ih2->dlengsz;
735 sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS);
737 return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
738 len, REQTYPE_SOFT_COMMAND));
741 int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
745 struct octeon_soft_command *sc;
747 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
748 spin_lock_init(&oct->sc_buf_pool.lock);
749 atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
751 for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
752 sc = (struct octeon_soft_command *)
754 SOFT_COMMAND_BUFFER_SIZE,
755 (dma_addr_t *)&dma_addr);
757 octeon_free_sc_buffer_pool(oct);
761 sc->dma_addr = dma_addr;
762 sc->size = SOFT_COMMAND_BUFFER_SIZE;
764 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
770 int octeon_free_sc_done_list(struct octeon_device *oct)
772 struct octeon_response_list *done_sc_list, *zombie_sc_list;
773 struct octeon_soft_command *sc;
774 struct list_head *tmp, *tmp2;
775 spinlock_t *sc_lists_lock; /* lock for response_list */
777 done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST];
778 zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST];
780 if (!atomic_read(&done_sc_list->pending_req_count))
783 sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock;
785 spin_lock_bh(sc_lists_lock);
787 list_for_each_safe(tmp, tmp2, &done_sc_list->head) {
788 sc = list_entry(tmp, struct octeon_soft_command, node);
790 if (READ_ONCE(sc->caller_is_done)) {
792 atomic_dec(&done_sc_list->pending_req_count);
794 if (*sc->status_word == COMPLETION_WORD_INIT) {
795 /* timeout; move sc to zombie list */
796 list_add_tail(&sc->node, &zombie_sc_list->head);
797 atomic_inc(&zombie_sc_list->pending_req_count);
799 octeon_free_soft_command(oct, sc);
804 spin_unlock_bh(sc_lists_lock);
809 int octeon_free_sc_zombie_list(struct octeon_device *oct)
811 struct octeon_response_list *zombie_sc_list;
812 struct octeon_soft_command *sc;
813 struct list_head *tmp, *tmp2;
814 spinlock_t *sc_lists_lock; /* lock for response_list */
816 zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST];
817 sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock;
819 spin_lock_bh(sc_lists_lock);
821 list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) {
823 atomic_dec(&zombie_sc_list->pending_req_count);
824 sc = list_entry(tmp, struct octeon_soft_command, node);
825 octeon_free_soft_command(oct, sc);
828 spin_unlock_bh(sc_lists_lock);
833 int octeon_free_sc_buffer_pool(struct octeon_device *oct)
835 struct list_head *tmp, *tmp2;
836 struct octeon_soft_command *sc;
838 octeon_free_sc_zombie_list(oct);
840 spin_lock_bh(&oct->sc_buf_pool.lock);
842 list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
845 sc = (struct octeon_soft_command *)tmp;
847 lio_dma_free(oct, sc->size, sc, sc->dma_addr);
850 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
852 spin_unlock_bh(&oct->sc_buf_pool.lock);
857 struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
864 u32 offset = sizeof(struct octeon_soft_command);
865 struct octeon_soft_command *sc = NULL;
866 struct list_head *tmp;
871 WARN_ON((offset + datasize + rdatasize + ctxsize) >
872 SOFT_COMMAND_BUFFER_SIZE);
874 spin_lock_bh(&oct->sc_buf_pool.lock);
876 if (list_empty(&oct->sc_buf_pool.head)) {
877 spin_unlock_bh(&oct->sc_buf_pool.lock);
881 list_for_each(tmp, &oct->sc_buf_pool.head)
886 atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
888 spin_unlock_bh(&oct->sc_buf_pool.lock);
890 sc = (struct octeon_soft_command *)tmp;
892 dma_addr = sc->dma_addr;
895 memset(sc, 0, sc->size);
897 sc->dma_addr = dma_addr;
901 sc->ctxptr = (u8 *)sc + offset;
902 sc->ctxsize = ctxsize;
905 /* Start data at 128 byte boundary */
906 offset = (offset + ctxsize + 127) & 0xffffff80;
909 sc->virtdptr = (u8 *)sc + offset;
910 sc->dmadptr = dma_addr + offset;
911 sc->datasize = datasize;
914 /* Start rdata at 128 byte boundary */
915 offset = (offset + datasize + 127) & 0xffffff80;
918 WARN_ON(rdatasize < 16);
919 sc->virtrptr = (u8 *)sc + offset;
920 sc->dmarptr = dma_addr + offset;
921 sc->rdatasize = rdatasize;
922 sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
928 void octeon_free_soft_command(struct octeon_device *oct,
929 struct octeon_soft_command *sc)
931 spin_lock_bh(&oct->sc_buf_pool.lock);
933 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
935 atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
937 spin_unlock_bh(&oct->sc_buf_pool.lock);