(pfvf->hw.cq_ecount_wait - 1));
}
-dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
+int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma)
{
- dma_addr_t iova;
u8 *buf;
buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
if (unlikely(!buf))
return -ENOMEM;
- iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
+ *dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
- if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
+ if (unlikely(dma_mapping_error(pfvf->dev, *dma))) {
page_frag_free(buf);
return -ENOMEM;
}
- return iova;
+ return 0;
}
-static dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
+static int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma)
{
- dma_addr_t addr;
+ int ret;
local_bh_disable();
- addr = __otx2_alloc_rbuf(pfvf, pool);
+ ret = __otx2_alloc_rbuf(pfvf, pool, dma);
local_bh_enable();
- return addr;
+ return ret;
}
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
struct refill_work *wrk;
int qidx, free_ptrs = 0;
struct otx2_nic *pfvf;
- s64 bufptr;
+ dma_addr_t bufptr;
wrk = container_of(work, struct refill_work, pool_refill_work.work);
pfvf = wrk->pf;
free_ptrs = cq->pool_ptrs;
while (cq->pool_ptrs) {
- bufptr = otx2_alloc_rbuf(pfvf, rbpool);
- if (bufptr <= 0) {
+ if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
/* Schedule a WQ if we fails to free atleast half of the
* pointers else enable napi for this RQ.
*/
struct otx2_hw *hw = &pfvf->hw;
struct otx2_snd_queue *sq;
struct otx2_pool *pool;
+ dma_addr_t bufptr;
int err, ptr;
- s64 bufptr;
/* Calculate number of SQBs needed.
*
return -ENOMEM;
for (ptr = 0; ptr < num_sqbs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool);
- if (bufptr <= 0)
- return bufptr;
+ if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
+ return -ENOMEM;
otx2_aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
int stack_pages, pool_id, rq;
struct otx2_pool *pool;
int err, ptr, num_ptrs;
- s64 bufptr;
+ dma_addr_t bufptr;
num_ptrs = pfvf->qset.rqe_cnt;
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
for (ptr = 0; ptr < num_ptrs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool);
- if (bufptr <= 0)
- return bufptr;
+ if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
+ return -ENOMEM;
otx2_aura_freeptr(pfvf, pool_id,
bufptr + OTX2_HEAD_ROOM);
}
/* Free pointer to a pool/aura */
static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
- int aura, s64 buf)
+ int aura, u64 buf)
{
- otx2_write128((u64)buf, (u64)aura | BIT_ULL(63),
+ otx2_write128(buf, (u64)aura | BIT_ULL(63),
otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
}
int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
-dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool);
+int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);