1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <linux/dmaengine.h>
10 #include <uapi/linux/idxd.h>
11 #include "../dmaengine.h"
12 #include "registers.h"
15 static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
17 struct idxd_dma_chan *idxd_chan;
19 idxd_chan = container_of(c, struct idxd_dma_chan, chan);
23 void idxd_dma_complete_txd(struct idxd_desc *desc,
24 enum idxd_complete_type comp_type,
27 struct idxd_device *idxd = desc->wq->idxd;
28 struct dma_async_tx_descriptor *tx;
29 struct dmaengine_result res;
32 if (desc->completion->status == DSA_COMP_SUCCESS) {
33 res.result = DMA_TRANS_NOERROR;
34 } else if (desc->completion->status) {
35 if (idxd->request_int_handles && comp_type != IDXD_COMPLETE_ABORT &&
36 desc->completion->status == DSA_COMP_INT_HANDLE_INVAL &&
37 idxd_queue_int_handle_resubmit(desc))
39 res.result = DMA_TRANS_WRITE_FAILED;
40 } else if (comp_type == IDXD_COMPLETE_ABORT) {
41 res.result = DMA_TRANS_ABORTED;
47 if (complete && tx->cookie) {
48 dma_cookie_complete(tx);
49 dma_descriptor_unmap(tx);
50 dmaengine_desc_get_callback_invoke(tx, &res);
52 tx->callback_result = NULL;
56 idxd_free_desc(desc->wq, desc);
59 static void op_flag_setup(unsigned long flags, u32 *desc_flags)
61 *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
62 if (flags & DMA_PREP_INTERRUPT)
63 *desc_flags |= IDXD_OP_FLAG_RCI;
66 static inline void set_completion_address(struct idxd_desc *desc,
69 *compl_addr = desc->compl_dma;
72 static inline void idxd_prep_desc_common(struct idxd_wq *wq,
73 struct dsa_hw_desc *hw, char opcode,
74 u64 addr_f1, u64 addr_f2, u64 len,
79 hw->src_addr = addr_f1;
80 hw->dst_addr = addr_f2;
83 * For dedicated WQ, this field is ignored and HW will use the WQCFG.priv
84 * field instead. This field should be set to 1 for kernel descriptors.
87 hw->completion_addr = compl;
90 static struct dma_async_tx_descriptor *
91 idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
92 dma_addr_t dma_src, size_t len, unsigned long flags)
94 struct idxd_wq *wq = to_idxd_wq(c);
96 struct idxd_device *idxd = wq->idxd;
97 struct idxd_desc *desc;
99 if (wq->state != IDXD_WQ_ENABLED)
102 if (len > idxd->max_xfer_bytes)
105 op_flag_setup(flags, &desc_flags);
106 desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
110 idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
111 dma_src, dma_dest, len, desc->compl_dma,
114 desc->txd.flags = flags;
119 static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
121 struct idxd_wq *wq = to_idxd_wq(chan);
122 struct device *dev = &wq->idxd->pdev->dev;
125 dev_dbg(dev, "%s: client_count: %d\n", __func__,
126 idxd_wq_refcount(wq));
130 static void idxd_dma_free_chan_resources(struct dma_chan *chan)
132 struct idxd_wq *wq = to_idxd_wq(chan);
133 struct device *dev = &wq->idxd->pdev->dev;
136 dev_dbg(dev, "%s: client_count: %d\n", __func__,
137 idxd_wq_refcount(wq));
140 static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
142 struct dma_tx_state *txstate)
144 return DMA_OUT_OF_ORDER;
148 * issue_pending() does not need to do anything since tx_submit() does the job
151 static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
155 static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
157 struct dma_chan *c = tx->chan;
158 struct idxd_wq *wq = to_idxd_wq(c);
161 struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
163 cookie = dma_cookie_assign(tx);
165 rc = idxd_submit_desc(wq, desc);
167 idxd_free_desc(wq, desc);
174 static void idxd_dma_release(struct dma_device *device)
176 struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
181 int idxd_register_dma_device(struct idxd_device *idxd)
183 struct idxd_dma_dev *idxd_dma;
184 struct dma_device *dma;
185 struct device *dev = &idxd->pdev->dev;
188 idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
192 dma = &idxd_dma->dma;
193 INIT_LIST_HEAD(&dma->channels);
196 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
197 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
198 dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
199 dma->device_release = idxd_dma_release;
201 if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
202 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
203 dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
206 dma->device_tx_status = idxd_dma_tx_status;
207 dma->device_issue_pending = idxd_dma_issue_pending;
208 dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
209 dma->device_free_chan_resources = idxd_dma_free_chan_resources;
211 rc = dma_async_device_register(dma);
217 idxd_dma->idxd = idxd;
219 * This pointer is protected by the refs taken by the dma_chan. It will remain valid
220 * as long as there are outstanding channels.
222 idxd->idxd_dma = idxd_dma;
226 void idxd_unregister_dma_device(struct idxd_device *idxd)
228 dma_async_device_unregister(&idxd->idxd_dma->dma);
231 int idxd_register_dma_channel(struct idxd_wq *wq)
233 struct idxd_device *idxd = wq->idxd;
234 struct dma_device *dma = &idxd->idxd_dma->dma;
235 struct device *dev = &idxd->pdev->dev;
236 struct idxd_dma_chan *idxd_chan;
237 struct dma_chan *chan;
240 idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
244 chan = &idxd_chan->chan;
246 list_add_tail(&chan->device_node, &dma->channels);
248 for (i = 0; i < wq->num_descs; i++) {
249 struct idxd_desc *desc = wq->descs[i];
251 dma_async_tx_descriptor_init(&desc->txd, chan);
252 desc->txd.tx_submit = idxd_dma_tx_submit;
255 rc = dma_async_device_channel_register(dma, chan);
261 wq->idxd_chan = idxd_chan;
263 get_device(wq_confdev(wq));
268 void idxd_unregister_dma_channel(struct idxd_wq *wq)
270 struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
271 struct dma_chan *chan = &idxd_chan->chan;
272 struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
274 dma_async_device_channel_unregister(&idxd_dma->dma, chan);
275 list_del(&chan->device_node);
276 kfree(wq->idxd_chan);
277 wq->idxd_chan = NULL;
278 put_device(wq_confdev(wq));
281 static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
283 struct device *dev = &idxd_dev->conf_dev;
284 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
285 struct idxd_device *idxd = wq->idxd;
288 if (idxd->state != IDXD_DEV_ENABLED)
291 mutex_lock(&wq->wq_lock);
292 wq->type = IDXD_WQT_KERNEL;
294 rc = drv_enable_wq(wq);
296 dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
301 rc = idxd_register_dma_channel(wq);
303 idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR;
304 dev_dbg(dev, "Failed to register dma channel\n");
308 idxd->cmd_status = 0;
309 mutex_unlock(&wq->wq_lock);
315 wq->type = IDXD_WQT_NONE;
316 mutex_unlock(&wq->wq_lock);
320 static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
322 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
324 mutex_lock(&wq->wq_lock);
325 __idxd_wq_quiesce(wq);
326 idxd_unregister_dma_channel(wq);
328 mutex_unlock(&wq->wq_lock);
331 static enum idxd_dev_type dev_types[] = {
336 struct idxd_device_driver idxd_dmaengine_drv = {
337 .probe = idxd_dmaengine_drv_probe,
338 .remove = idxd_dmaengine_drv_remove,
342 EXPORT_SYMBOL_GPL(idxd_dmaengine_drv);