1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */
4 #include <linux/vdpa.h>
5 #include <uapi/linux/virtio_ids.h>
6 #include <linux/virtio_config.h>
7 #include <linux/mlx5/qp.h>
8 #include <linux/mlx5/device.h>
9 #include <linux/mlx5/vport.h>
10 #include <linux/mlx5/fs.h>
11 #include <linux/mlx5/device.h>
12 #include "mlx5_vnet.h"
13 #include "mlx5_vdpa_ifc.h"
14 #include "mlx5_vdpa.h"
16 #define to_mvdev(__vdev) container_of((__vdev), struct mlx5_vdpa_dev, vdev)
18 #define VALID_FEATURES_MASK \
19 (BIT(VIRTIO_NET_F_CSUM) | BIT(VIRTIO_NET_F_GUEST_CSUM) | \
20 BIT(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | BIT(VIRTIO_NET_F_MTU) | BIT(VIRTIO_NET_F_MAC) | \
21 BIT(VIRTIO_NET_F_GUEST_TSO4) | BIT(VIRTIO_NET_F_GUEST_TSO6) | \
22 BIT(VIRTIO_NET_F_GUEST_ECN) | BIT(VIRTIO_NET_F_GUEST_UFO) | BIT(VIRTIO_NET_F_HOST_TSO4) | \
23 BIT(VIRTIO_NET_F_HOST_TSO6) | BIT(VIRTIO_NET_F_HOST_ECN) | BIT(VIRTIO_NET_F_HOST_UFO) | \
24 BIT(VIRTIO_NET_F_MRG_RXBUF) | BIT(VIRTIO_NET_F_STATUS) | BIT(VIRTIO_NET_F_CTRL_VQ) | \
25 BIT(VIRTIO_NET_F_CTRL_RX) | BIT(VIRTIO_NET_F_CTRL_VLAN) | \
26 BIT(VIRTIO_NET_F_CTRL_RX_EXTRA) | BIT(VIRTIO_NET_F_GUEST_ANNOUNCE) | \
27 BIT(VIRTIO_NET_F_MQ) | BIT(VIRTIO_NET_F_CTRL_MAC_ADDR) | BIT(VIRTIO_NET_F_HASH_REPORT) | \
28 BIT(VIRTIO_NET_F_RSS) | BIT(VIRTIO_NET_F_RSC_EXT) | BIT(VIRTIO_NET_F_STANDBY) | \
29 BIT(VIRTIO_NET_F_SPEED_DUPLEX) | BIT(VIRTIO_F_NOTIFY_ON_EMPTY) | \
30 BIT(VIRTIO_F_ANY_LAYOUT) | BIT(VIRTIO_F_VERSION_1) | BIT(VIRTIO_F_ACCESS_PLATFORM) | \
31 BIT(VIRTIO_F_RING_PACKED) | BIT(VIRTIO_F_ORDER_PLATFORM) | BIT(VIRTIO_F_SR_IOV))
33 #define VALID_STATUS_MASK \
34 (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK | \
35 VIRTIO_CONFIG_S_FEATURES_OK | VIRTIO_CONFIG_S_NEEDS_RESET | VIRTIO_CONFIG_S_FAILED)
37 struct mlx5_vdpa_net_resources {
45 struct mlx5_vdpa_cq_buf {
46 struct mlx5_frag_buf_ctrl fbc;
47 struct mlx5_frag_buf frag_buf;
53 struct mlx5_core_cq mcq;
54 struct mlx5_vdpa_cq_buf buf;
59 struct mlx5_vdpa_umem {
60 struct mlx5_frag_buf_ctrl fbc;
61 struct mlx5_frag_buf frag_buf;
67 struct mlx5_core_qp mqp;
68 struct mlx5_frag_buf frag_buf;
74 struct mlx5_vq_restore_info {
81 struct vdpa_callback cb;
85 struct mlx5_vdpa_virtqueue {
91 struct vdpa_callback event_cb;
93 /* Resources for implementing the notification channel from the device
94 * to the driver. fwqp is the firmware end of an RC connection; the
95 * other end is vqqp used by the driver. cq is is where completions are
98 struct mlx5_vdpa_cq cq;
99 struct mlx5_vdpa_qp fwqp;
100 struct mlx5_vdpa_qp vqqp;
102 /* umem resources are required for the virtqueue operation. They're use
103 * is internal and they must be provided by the driver.
105 struct mlx5_vdpa_umem umem1;
106 struct mlx5_vdpa_umem umem2;
107 struct mlx5_vdpa_umem umem3;
112 struct mlx5_vdpa_net *ndev;
116 /* keep last in the struct */
117 struct mlx5_vq_restore_info ri;
120 /* We will remove this limitation once mlx5_vdpa_alloc_resources()
121 * provides for driver space allocation
123 #define MLX5_MAX_SUPPORTED_VQS 16
125 struct mlx5_vdpa_net {
126 struct mlx5_vdpa_dev mvdev;
127 struct mlx5_vdpa_net_resources res;
128 struct virtio_net_config config;
129 struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS];
131 /* Serialize vq resources creation and destruction. This is required
132 * since memory map might change and we need to destroy and create
133 * resources while driver in operational.
135 struct mutex reslock;
136 struct mlx5_flow_table *rxft;
137 struct mlx5_fc *rx_counter;
138 struct mlx5_flow_handle *rx_rule;
142 static void free_resources(struct mlx5_vdpa_net *ndev);
143 static void init_mvqs(struct mlx5_vdpa_net *ndev);
144 static int setup_driver(struct mlx5_vdpa_net *ndev);
145 static void teardown_driver(struct mlx5_vdpa_net *ndev);
147 static bool mlx5_vdpa_debug;
149 #define MLX5_LOG_VIO_FLAG(_feature) \
151 if (features & BIT(_feature)) \
152 mlx5_vdpa_info(mvdev, "%s\n", #_feature); \
155 #define MLX5_LOG_VIO_STAT(_status) \
157 if (status & (_status)) \
158 mlx5_vdpa_info(mvdev, "%s\n", #_status); \
161 static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set)
163 if (status & ~VALID_STATUS_MASK)
164 mlx5_vdpa_warn(mvdev, "Warning: there are invalid status bits 0x%x\n",
165 status & ~VALID_STATUS_MASK);
167 if (!mlx5_vdpa_debug)
170 mlx5_vdpa_info(mvdev, "driver status %s", set ? "set" : "get");
171 if (set && !status) {
172 mlx5_vdpa_info(mvdev, "driver resets the device\n");
176 MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_ACKNOWLEDGE);
177 MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER);
178 MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER_OK);
179 MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FEATURES_OK);
180 MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_NEEDS_RESET);
181 MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FAILED);
184 static void print_features(struct mlx5_vdpa_dev *mvdev, u64 features, bool set)
186 if (features & ~VALID_FEATURES_MASK)
187 mlx5_vdpa_warn(mvdev, "There are invalid feature bits 0x%llx\n",
188 features & ~VALID_FEATURES_MASK);
190 if (!mlx5_vdpa_debug)
193 mlx5_vdpa_info(mvdev, "driver %s feature bits:\n", set ? "sets" : "reads");
195 mlx5_vdpa_info(mvdev, "all feature bits are cleared\n");
197 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CSUM);
198 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_CSUM);
199 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
200 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MTU);
201 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MAC);
202 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO4);
203 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO6);
204 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ECN);
205 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_UFO);
206 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO4);
207 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO6);
208 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_ECN);
209 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_UFO);
210 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MRG_RXBUF);
211 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STATUS);
212 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VQ);
213 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX);
214 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VLAN);
215 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX_EXTRA);
216 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ANNOUNCE);
217 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MQ);
218 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_MAC_ADDR);
219 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HASH_REPORT);
220 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSS);
221 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSC_EXT);
222 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STANDBY);
223 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_SPEED_DUPLEX);
224 MLX5_LOG_VIO_FLAG(VIRTIO_F_NOTIFY_ON_EMPTY);
225 MLX5_LOG_VIO_FLAG(VIRTIO_F_ANY_LAYOUT);
226 MLX5_LOG_VIO_FLAG(VIRTIO_F_VERSION_1);
227 MLX5_LOG_VIO_FLAG(VIRTIO_F_ACCESS_PLATFORM);
228 MLX5_LOG_VIO_FLAG(VIRTIO_F_RING_PACKED);
229 MLX5_LOG_VIO_FLAG(VIRTIO_F_ORDER_PLATFORM);
230 MLX5_LOG_VIO_FLAG(VIRTIO_F_SR_IOV);
233 static int create_tis(struct mlx5_vdpa_net *ndev)
235 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
236 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
240 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
241 MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn);
242 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn);
244 mlx5_vdpa_warn(mvdev, "create TIS (%d)\n", err);
249 static void destroy_tis(struct mlx5_vdpa_net *ndev)
251 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn);
254 #define MLX5_VDPA_CQE_SIZE 64
255 #define MLX5_VDPA_LOG_CQE_SIZE ilog2(MLX5_VDPA_CQE_SIZE)
257 static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent)
259 struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
260 u8 log_wq_stride = MLX5_VDPA_LOG_CQE_SIZE;
261 u8 log_wq_sz = MLX5_VDPA_LOG_CQE_SIZE;
264 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf,
265 ndev->mvdev.mdev->priv.numa_node);
269 mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
271 buf->cqe_size = MLX5_VDPA_CQE_SIZE;
277 static int umem_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem, int size)
279 struct mlx5_frag_buf *frag_buf = &umem->frag_buf;
281 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf,
282 ndev->mvdev.mdev->priv.numa_node);
285 static void cq_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf)
287 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf);
290 static void *get_cqe(struct mlx5_vdpa_cq *vcq, int n)
292 return mlx5_frag_buf_get_wqe(&vcq->buf.fbc, n);
295 static void cq_frag_buf_init(struct mlx5_vdpa_cq *vcq, struct mlx5_vdpa_cq_buf *buf)
297 struct mlx5_cqe64 *cqe64;
301 for (i = 0; i < buf->nent; i++) {
302 cqe = get_cqe(vcq, i);
304 cqe64->op_own = MLX5_CQE_INVALID << 4;
308 static void *get_sw_cqe(struct mlx5_vdpa_cq *cq, int n)
310 struct mlx5_cqe64 *cqe64 = get_cqe(cq, n & (cq->cqe - 1));
312 if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
313 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & cq->cqe)))
319 static void rx_post(struct mlx5_vdpa_qp *vqp, int n)
322 vqp->db.db[0] = cpu_to_be32(vqp->head);
325 static void qp_prepare(struct mlx5_vdpa_net *ndev, bool fw, void *in,
326 struct mlx5_vdpa_virtqueue *mvq, u32 num_ent)
328 struct mlx5_vdpa_qp *vqp;
332 vqp = fw ? &mvq->fwqp : &mvq->vqqp;
333 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid);
334 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
336 /* Firmware QP is allocated by the driver for the firmware's
337 * use so we can skip part of the params as they will be chosen by firmware
339 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
340 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
341 MLX5_SET(qpc, qpc, no_sq, 1);
345 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
346 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
347 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
348 MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
349 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index);
350 MLX5_SET(qpc, qpc, log_page_size, vqp->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
351 MLX5_SET(qpc, qpc, no_sq, 1);
352 MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn);
353 MLX5_SET(qpc, qpc, log_rq_size, ilog2(num_ent));
354 MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
355 pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas);
356 mlx5_fill_page_frag_array(&vqp->frag_buf, pas);
359 static int rq_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp, u32 num_ent)
361 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev,
362 num_ent * sizeof(struct mlx5_wqe_data_seg), &vqp->frag_buf,
363 ndev->mvdev.mdev->priv.numa_node);
366 static void rq_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp)
368 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf);
371 static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
372 struct mlx5_vdpa_qp *vqp)
374 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
375 int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
376 u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
383 err = rq_buf_alloc(ndev, vqp, mvq->num_ent);
387 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db);
390 inlen += vqp->frag_buf.npages * sizeof(__be64);
393 in = kzalloc(inlen, GFP_KERNEL);
399 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent);
400 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
401 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
402 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
403 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
404 MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
406 MLX5_SET64(qpc, qpc, dbr_addr, vqp->db.dma);
407 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
408 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
413 vqp->mqp.uid = ndev->mvdev.res.uid;
414 vqp->mqp.qpn = MLX5_GET(create_qp_out, out, qpn);
417 rx_post(vqp, mvq->num_ent);
423 mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
426 rq_buf_free(ndev, vqp);
431 static void qp_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp)
433 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
435 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
436 MLX5_SET(destroy_qp_in, in, qpn, vqp->mqp.qpn);
437 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid);
438 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in))
439 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn);
441 mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
442 rq_buf_free(ndev, vqp);
446 static void *next_cqe_sw(struct mlx5_vdpa_cq *cq)
448 return get_sw_cqe(cq, cq->mcq.cons_index);
451 static int mlx5_vdpa_poll_one(struct mlx5_vdpa_cq *vcq)
453 struct mlx5_cqe64 *cqe64;
455 cqe64 = next_cqe_sw(vcq);
459 vcq->mcq.cons_index++;
463 static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num)
465 mlx5_cq_set_ci(&mvq->cq.mcq);
466 rx_post(&mvq->vqqp, num);
467 if (mvq->event_cb.callback)
468 mvq->event_cb.callback(mvq->event_cb.private);
471 static void mlx5_vdpa_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
473 struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq);
474 struct mlx5_vdpa_net *ndev = mvq->ndev;
475 void __iomem *uar_page = ndev->mvdev.res.uar->map;
478 while (!mlx5_vdpa_poll_one(&mvq->cq)) {
480 if (num > mvq->num_ent / 2) {
481 /* If completions keep coming while we poll, we want to
482 * let the hardware know that we consumed them by
483 * updating the doorbell record. We also let vdpa core
484 * know about this so it passes it on the virtio driver
487 mlx5_vdpa_handle_completions(mvq, num);
493 mlx5_vdpa_handle_completions(mvq, num);
495 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
498 static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
500 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
501 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
502 void __iomem *uar_page = ndev->mvdev.res.uar->map;
503 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
504 struct mlx5_vdpa_cq *vcq = &mvq->cq;
513 err = mlx5_db_alloc(mdev, &vcq->db);
517 vcq->mcq.set_ci_db = vcq->db.db;
518 vcq->mcq.arm_db = vcq->db.db + 1;
519 vcq->mcq.cqe_sz = 64;
521 err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent);
525 cq_frag_buf_init(vcq, &vcq->buf);
527 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
528 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * vcq->buf.frag_buf.npages;
529 in = kzalloc(inlen, GFP_KERNEL);
535 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid);
536 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
537 mlx5_fill_page_frag_array(&vcq->buf.frag_buf, pas);
539 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
540 MLX5_SET(cqc, cqc, log_page_size, vcq->buf.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
542 /* Use vector 0 by default. Consider adding code to choose least used
545 err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn);
549 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
550 MLX5_SET(cqc, cqc, log_cq_size, ilog2(num_ent));
551 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index);
552 MLX5_SET(cqc, cqc, c_eqn, eqn);
553 MLX5_SET64(cqc, cqc, dbr_addr, vcq->db.dma);
555 err = mlx5_core_create_cq(mdev, &vcq->mcq, in, inlen, out, sizeof(out));
559 vcq->mcq.comp = mlx5_vdpa_cq_comp;
561 vcq->mcq.set_ci_db = vcq->db.db;
562 vcq->mcq.arm_db = vcq->db.db + 1;
563 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
570 cq_frag_buf_free(ndev, &vcq->buf);
572 mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
576 static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
578 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
579 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
580 struct mlx5_vdpa_cq *vcq = &mvq->cq;
582 if (mlx5_core_destroy_cq(mdev, &vcq->mcq)) {
583 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn);
586 cq_frag_buf_free(ndev, &vcq->buf);
587 mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
590 static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
591 struct mlx5_vdpa_umem **umemp)
593 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
599 p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a);
600 p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b);
601 *umemp = &mvq->umem1;
604 p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a);
605 p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b);
606 *umemp = &mvq->umem2;
609 p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a);
610 p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b);
611 *umemp = &mvq->umem3;
614 return p_a * mvq->num_ent + p_b;
617 static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem)
619 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf);
622 static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
625 u32 out[MLX5_ST_SZ_DW(create_umem_out)] = {};
631 struct mlx5_vdpa_umem *umem;
633 size = umem_size(ndev, mvq, num, &umem);
638 err = umem_frag_buf_alloc(ndev, umem, size);
642 inlen = MLX5_ST_SZ_BYTES(create_umem_in) + MLX5_ST_SZ_BYTES(mtt) * umem->frag_buf.npages;
644 in = kzalloc(inlen, GFP_KERNEL);
650 MLX5_SET(create_umem_in, in, opcode, MLX5_CMD_OP_CREATE_UMEM);
651 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid);
652 um = MLX5_ADDR_OF(create_umem_in, in, umem);
653 MLX5_SET(umem, um, log_page_size, umem->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
654 MLX5_SET64(umem, um, num_of_mtt, umem->frag_buf.npages);
656 pas = (__be64 *)MLX5_ADDR_OF(umem, um, mtt[0]);
657 mlx5_fill_page_frag_array_perm(&umem->frag_buf, pas, MLX5_MTT_PERM_RW);
659 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
661 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err);
666 umem->id = MLX5_GET(create_umem_out, out, umem_id);
673 umem_frag_buf_free(ndev, umem);
677 static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
679 u32 in[MLX5_ST_SZ_DW(destroy_umem_in)] = {};
680 u32 out[MLX5_ST_SZ_DW(destroy_umem_out)] = {};
681 struct mlx5_vdpa_umem *umem;
695 MLX5_SET(destroy_umem_in, in, opcode, MLX5_CMD_OP_DESTROY_UMEM);
696 MLX5_SET(destroy_umem_in, in, umem_id, umem->id);
697 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)))
700 umem_frag_buf_free(ndev, umem);
703 static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
708 for (num = 1; num <= 3; num++) {
709 err = create_umem(ndev, mvq, num);
716 for (num--; num > 0; num--)
717 umem_destroy(ndev, mvq, num);
722 static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
726 for (num = 3; num > 0; num--)
727 umem_destroy(ndev, mvq, num);
730 static int get_queue_type(struct mlx5_vdpa_net *ndev)
734 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
736 /* prefer split queue */
737 if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED)
738 return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
740 WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT));
742 return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
745 static bool vq_is_tx(u16 idx)
750 static u16 get_features_12_3(u64 features)
752 return (!!(features & BIT(VIRTIO_NET_F_HOST_TSO4)) << 9) |
753 (!!(features & BIT(VIRTIO_NET_F_HOST_TSO6)) << 8) |
754 (!!(features & BIT(VIRTIO_NET_F_CSUM)) << 7) |
755 (!!(features & BIT(VIRTIO_NET_F_GUEST_CSUM)) << 6);
758 static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
760 int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in);
761 u32 out[MLX5_ST_SZ_DW(create_virtio_net_q_out)] = {};
768 err = umems_create(ndev, mvq);
772 in = kzalloc(inlen, GFP_KERNEL);
778 cmd_hdr = MLX5_ADDR_OF(create_virtio_net_q_in, in, general_obj_in_cmd_hdr);
780 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
781 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
782 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
784 obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
785 MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
786 MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
787 get_features_12_3(ndev->mvdev.actual_features));
788 vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
789 MLX5_SET(virtio_q, vq_ctx, virtio_q_type, get_queue_type(ndev));
791 if (vq_is_tx(mvq->index))
792 MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn);
794 MLX5_SET(virtio_q, vq_ctx, event_mode, MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE);
795 MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index);
796 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
797 MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
798 MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
799 !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1));
800 MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
801 MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
802 MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
803 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey.key);
804 MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
805 MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
806 MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id);
807 MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem1.size);
808 MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
809 MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem1.size);
810 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
811 if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type))
812 MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1);
814 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
819 mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
826 umems_destroy(ndev, mvq);
830 static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
832 u32 in[MLX5_ST_SZ_DW(destroy_virtio_net_q_in)] = {};
833 u32 out[MLX5_ST_SZ_DW(destroy_virtio_net_q_out)] = {};
835 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.opcode,
836 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
837 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id);
838 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid);
839 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_type,
840 MLX5_OBJ_TYPE_VIRTIO_NET_Q);
841 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) {
842 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id);
845 umems_destroy(ndev, mvq);
848 static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
850 return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn;
853 static u32 get_qpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
855 return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn;
858 static void alloc_inout(struct mlx5_vdpa_net *ndev, int cmd, void **in, int *inlen, void **out,
859 int *outlen, u32 qpn, u32 rqpn)
865 case MLX5_CMD_OP_2RST_QP:
866 *inlen = MLX5_ST_SZ_BYTES(qp_2rst_in);
867 *outlen = MLX5_ST_SZ_BYTES(qp_2rst_out);
868 *in = kzalloc(*inlen, GFP_KERNEL);
869 *out = kzalloc(*outlen, GFP_KERNEL);
873 MLX5_SET(qp_2rst_in, *in, opcode, cmd);
874 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid);
875 MLX5_SET(qp_2rst_in, *in, qpn, qpn);
877 case MLX5_CMD_OP_RST2INIT_QP:
878 *inlen = MLX5_ST_SZ_BYTES(rst2init_qp_in);
879 *outlen = MLX5_ST_SZ_BYTES(rst2init_qp_out);
880 *in = kzalloc(*inlen, GFP_KERNEL);
881 *out = kzalloc(MLX5_ST_SZ_BYTES(rst2init_qp_out), GFP_KERNEL);
885 MLX5_SET(rst2init_qp_in, *in, opcode, cmd);
886 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid);
887 MLX5_SET(rst2init_qp_in, *in, qpn, qpn);
888 qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
889 MLX5_SET(qpc, qpc, remote_qpn, rqpn);
890 MLX5_SET(qpc, qpc, rwe, 1);
891 pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
892 MLX5_SET(ads, pp, vhca_port_num, 1);
894 case MLX5_CMD_OP_INIT2RTR_QP:
895 *inlen = MLX5_ST_SZ_BYTES(init2rtr_qp_in);
896 *outlen = MLX5_ST_SZ_BYTES(init2rtr_qp_out);
897 *in = kzalloc(*inlen, GFP_KERNEL);
898 *out = kzalloc(MLX5_ST_SZ_BYTES(init2rtr_qp_out), GFP_KERNEL);
902 MLX5_SET(init2rtr_qp_in, *in, opcode, cmd);
903 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid);
904 MLX5_SET(init2rtr_qp_in, *in, qpn, qpn);
905 qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
906 MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
907 MLX5_SET(qpc, qpc, log_msg_max, 30);
908 MLX5_SET(qpc, qpc, remote_qpn, rqpn);
909 pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
910 MLX5_SET(ads, pp, fl, 1);
912 case MLX5_CMD_OP_RTR2RTS_QP:
913 *inlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_in);
914 *outlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_out);
915 *in = kzalloc(*inlen, GFP_KERNEL);
916 *out = kzalloc(MLX5_ST_SZ_BYTES(rtr2rts_qp_out), GFP_KERNEL);
920 MLX5_SET(rtr2rts_qp_in, *in, opcode, cmd);
921 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid);
922 MLX5_SET(rtr2rts_qp_in, *in, qpn, qpn);
923 qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
924 pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
925 MLX5_SET(ads, pp, ack_timeout, 14);
926 MLX5_SET(qpc, qpc, retry_count, 7);
927 MLX5_SET(qpc, qpc, rnr_retry, 7);
943 static void free_inout(void *in, void *out)
949 /* Two QPs are used by each virtqueue. One is used by the driver and one by
950 * firmware. The fw argument indicates whether the subjected QP is the one used
953 static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd)
961 alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw));
965 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen);
970 static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
974 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP);
978 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP);
982 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP);
986 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP);
990 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP);
994 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP);
998 return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP);
1001 struct mlx5_virtq_attr {
1003 u16 available_index;
1006 static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
1007 struct mlx5_virtq_attr *attr)
1009 int outlen = MLX5_ST_SZ_BYTES(query_virtio_net_q_out);
1010 u32 in[MLX5_ST_SZ_DW(query_virtio_net_q_in)] = {};
1016 out = kzalloc(outlen, GFP_KERNEL);
1020 cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, in, general_obj_in_cmd_hdr);
1022 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1023 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
1024 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
1025 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
1026 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen);
1030 obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, out, obj_context);
1031 memset(attr, 0, sizeof(*attr));
1032 attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
1033 attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
1042 static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state)
1044 int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
1045 u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
1051 in = kzalloc(inlen, GFP_KERNEL);
1055 cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, in, general_obj_in_cmd_hdr);
1057 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1058 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
1059 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
1060 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
1062 obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
1063 MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select,
1064 MLX5_VIRTQ_MODIFY_MASK_STATE);
1065 MLX5_SET(virtio_net_q_object, obj_context, state, state);
1066 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
1069 mvq->fw_state = state;
1074 static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1076 u16 idx = mvq->index;
1082 if (mvq->initialized) {
1083 mlx5_vdpa_warn(&ndev->mvdev, "attempt re init\n");
1087 err = cq_create(ndev, idx, mvq->num_ent);
1091 err = qp_create(ndev, mvq, &mvq->fwqp);
1095 err = qp_create(ndev, mvq, &mvq->vqqp);
1099 err = connect_qps(ndev, mvq);
1103 err = create_virtqueue(ndev, mvq);
1108 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
1110 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n",
1116 mvq->initialized = true;
1120 qp_destroy(ndev, &mvq->vqqp);
1122 qp_destroy(ndev, &mvq->fwqp);
1124 cq_destroy(ndev, idx);
1128 static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1130 struct mlx5_virtq_attr attr;
1132 if (!mvq->initialized)
1135 if (query_virtqueue(ndev, mvq, &attr)) {
1136 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n");
1139 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
1142 if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND))
1143 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n");
1146 static void suspend_vqs(struct mlx5_vdpa_net *ndev)
1150 for (i = 0; i < MLX5_MAX_SUPPORTED_VQS; i++)
1151 suspend_vq(ndev, &ndev->vqs[i]);
1154 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1156 if (!mvq->initialized)
1159 suspend_vq(ndev, mvq);
1160 destroy_virtqueue(ndev, mvq);
1161 qp_destroy(ndev, &mvq->vqqp);
1162 qp_destroy(ndev, &mvq->fwqp);
1163 cq_destroy(ndev, mvq->index);
1164 mvq->initialized = false;
1167 static int create_rqt(struct mlx5_vdpa_net *ndev)
1177 log_max_rqt = min_t(int, 1, MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
1178 if (log_max_rqt < 1)
1181 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + (1 << log_max_rqt) * MLX5_ST_SZ_BYTES(rq_num);
1182 in = kzalloc(inlen, GFP_KERNEL);
1186 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid);
1187 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1189 MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
1190 MLX5_SET(rqtc, rqtc, rqt_max_size, 1 << log_max_rqt);
1191 MLX5_SET(rqtc, rqtc, rqt_actual_size, 1);
1192 list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
1193 for (i = 0, j = 0; j < ndev->mvdev.max_vqs; j++) {
1194 if (!ndev->vqs[j].initialized)
1197 if (!vq_is_tx(ndev->vqs[j].index)) {
1198 list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
1203 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
1211 static void destroy_rqt(struct mlx5_vdpa_net *ndev)
1213 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn);
1216 static int create_tir(struct mlx5_vdpa_net *ndev)
1218 #define HASH_IP_L4PORTS \
1219 (MLX5_HASH_FIELD_SEL_SRC_IP | MLX5_HASH_FIELD_SEL_DST_IP | MLX5_HASH_FIELD_SEL_L4_SPORT | \
1220 MLX5_HASH_FIELD_SEL_L4_DPORT)
1221 static const u8 rx_hash_toeplitz_key[] = { 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7,
1222 0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94,
1223 0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1,
1224 0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59,
1225 0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a };
1232 in = kzalloc(MLX5_ST_SZ_BYTES(create_tir_in), GFP_KERNEL);
1236 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid);
1237 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1238 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
1240 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1241 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
1242 rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
1243 memcpy(rss_key, rx_hash_toeplitz_key, sizeof(rx_hash_toeplitz_key));
1245 outer = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1246 MLX5_SET(rx_hash_field_select, outer, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4);
1247 MLX5_SET(rx_hash_field_select, outer, l4_prot_type, MLX5_L4_PROT_TYPE_TCP);
1248 MLX5_SET(rx_hash_field_select, outer, selected_fields, HASH_IP_L4PORTS);
1250 MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn);
1251 MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn);
1253 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn);
1258 static void destroy_tir(struct mlx5_vdpa_net *ndev)
1260 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn);
1263 static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
1265 struct mlx5_flow_destination dest[2] = {};
1266 struct mlx5_flow_table_attr ft_attr = {};
1267 struct mlx5_flow_act flow_act = {};
1268 struct mlx5_flow_namespace *ns;
1271 /* for now, one entry, match all, forward to tir */
1272 ft_attr.max_fte = 1;
1273 ft_attr.autogroup.max_num_groups = 1;
1275 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
1277 mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n");
1281 ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
1282 if (IS_ERR(ndev->rxft))
1283 return PTR_ERR(ndev->rxft);
1285 ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false);
1286 if (IS_ERR(ndev->rx_counter)) {
1287 err = PTR_ERR(ndev->rx_counter);
1291 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT;
1292 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1293 dest[0].tir_num = ndev->res.tirn;
1294 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1295 dest[1].counter_id = mlx5_fc_id(ndev->rx_counter);
1296 ndev->rx_rule = mlx5_add_flow_rules(ndev->rxft, NULL, &flow_act, dest, 2);
1297 if (IS_ERR(ndev->rx_rule)) {
1298 err = PTR_ERR(ndev->rx_rule);
1299 ndev->rx_rule = NULL;
1306 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
1308 mlx5_destroy_flow_table(ndev->rxft);
1312 static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
1317 mlx5_del_flow_rules(ndev->rx_rule);
1318 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
1319 mlx5_destroy_flow_table(ndev->rxft);
1321 ndev->rx_rule = NULL;
1324 static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
1326 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1327 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1328 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1330 if (unlikely(!mvq->ready))
1333 iowrite16(idx, ndev->mvdev.res.kick_addr);
1336 static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_area,
1337 u64 driver_area, u64 device_area)
1339 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1340 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1341 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1343 mvq->desc_addr = desc_area;
1344 mvq->device_addr = device_area;
1345 mvq->driver_addr = driver_area;
1349 static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
1351 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1352 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1353 struct mlx5_vdpa_virtqueue *mvq;
1355 mvq = &ndev->vqs[idx];
1359 static void mlx5_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb)
1361 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1362 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1363 struct mlx5_vdpa_virtqueue *vq = &ndev->vqs[idx];
1368 static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready)
1370 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1371 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1372 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1375 suspend_vq(ndev, mvq);
1380 static bool mlx5_vdpa_get_vq_ready(struct vdpa_device *vdev, u16 idx)
1382 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1383 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1384 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1389 static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
1390 const struct vdpa_vq_state *state)
1392 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1393 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1394 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1396 if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) {
1397 mlx5_vdpa_warn(mvdev, "can't modify available index\n");
1401 mvq->avail_idx = state->avail_index;
1405 static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa_vq_state *state)
1407 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1408 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1409 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1410 struct mlx5_virtq_attr attr;
1413 if (!mvq->initialized)
1416 err = query_virtqueue(ndev, mvq, &attr);
1418 mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
1421 state->avail_index = attr.available_index;
1425 static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
1430 enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
1431 MLX5_VIRTIO_NET_F_CSUM = 1 << 10,
1432 MLX5_VIRTIO_NET_F_HOST_TSO6 = 1 << 11,
1433 MLX5_VIRTIO_NET_F_HOST_TSO4 = 1 << 12,
1436 static u64 mlx_to_vritio_features(u16 dev_features)
1440 if (dev_features & MLX5_VIRTIO_NET_F_GUEST_CSUM)
1441 result |= BIT(VIRTIO_NET_F_GUEST_CSUM);
1442 if (dev_features & MLX5_VIRTIO_NET_F_CSUM)
1443 result |= BIT(VIRTIO_NET_F_CSUM);
1444 if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO6)
1445 result |= BIT(VIRTIO_NET_F_HOST_TSO6);
1446 if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO4)
1447 result |= BIT(VIRTIO_NET_F_HOST_TSO4);
1452 static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
1454 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1455 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1458 dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask);
1459 ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features);
1460 if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0))
1461 ndev->mvdev.mlx_features |= BIT(VIRTIO_F_VERSION_1);
1462 ndev->mvdev.mlx_features |= BIT(VIRTIO_F_ACCESS_PLATFORM);
1463 print_features(mvdev, ndev->mvdev.mlx_features, false);
1464 return ndev->mvdev.mlx_features;
1467 static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features)
1469 if (!(features & BIT(VIRTIO_F_ACCESS_PLATFORM)))
1475 static int setup_virtqueues(struct mlx5_vdpa_net *ndev)
1480 for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); i++) {
1481 err = setup_vq(ndev, &ndev->vqs[i]);
1489 for (--i; i >= 0; i--)
1490 teardown_vq(ndev, &ndev->vqs[i]);
1495 static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
1497 struct mlx5_vdpa_virtqueue *mvq;
1500 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
1501 mvq = &ndev->vqs[i];
1502 if (!mvq->initialized)
1505 teardown_vq(ndev, mvq);
1509 static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
1511 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1512 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1515 print_features(mvdev, features, true);
1517 err = verify_min_features(mvdev, features);
1521 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
1525 static void mlx5_vdpa_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb)
1527 /* not implemented */
1528 mlx5_vdpa_warn(to_mvdev(vdev), "set config callback not supported\n");
1531 #define MLX5_VDPA_MAX_VQ_ENTRIES 256
1532 static u16 mlx5_vdpa_get_vq_num_max(struct vdpa_device *vdev)
1534 return MLX5_VDPA_MAX_VQ_ENTRIES;
1537 static u32 mlx5_vdpa_get_device_id(struct vdpa_device *vdev)
1539 return VIRTIO_ID_NET;
1542 static u32 mlx5_vdpa_get_vendor_id(struct vdpa_device *vdev)
1544 return PCI_VENDOR_ID_MELLANOX;
1547 static u8 mlx5_vdpa_get_status(struct vdpa_device *vdev)
1549 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1550 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1552 print_status(mvdev, ndev->mvdev.status, false);
1553 return ndev->mvdev.status;
1556 static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1558 struct mlx5_vq_restore_info *ri = &mvq->ri;
1559 struct mlx5_virtq_attr attr;
1562 if (!mvq->initialized)
1565 err = query_virtqueue(ndev, mvq, &attr);
1569 ri->avail_index = attr.available_index;
1570 ri->ready = mvq->ready;
1571 ri->num_ent = mvq->num_ent;
1572 ri->desc_addr = mvq->desc_addr;
1573 ri->device_addr = mvq->device_addr;
1574 ri->driver_addr = mvq->driver_addr;
1575 ri->cb = mvq->event_cb;
1580 static int save_channels_info(struct mlx5_vdpa_net *ndev)
1584 for (i = 0; i < ndev->mvdev.max_vqs; i++) {
1585 memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri));
1586 save_channel_info(ndev, &ndev->vqs[i]);
1591 static void mlx5_clear_vqs(struct mlx5_vdpa_net *ndev)
1595 for (i = 0; i < ndev->mvdev.max_vqs; i++)
1596 memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
1599 static void restore_channels_info(struct mlx5_vdpa_net *ndev)
1601 struct mlx5_vdpa_virtqueue *mvq;
1602 struct mlx5_vq_restore_info *ri;
1605 mlx5_clear_vqs(ndev);
1607 for (i = 0; i < ndev->mvdev.max_vqs; i++) {
1608 mvq = &ndev->vqs[i];
1613 mvq->avail_idx = ri->avail_index;
1614 mvq->ready = ri->ready;
1615 mvq->num_ent = ri->num_ent;
1616 mvq->desc_addr = ri->desc_addr;
1617 mvq->device_addr = ri->device_addr;
1618 mvq->driver_addr = ri->driver_addr;
1619 mvq->event_cb = ri->cb;
1623 static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *iotlb)
1628 err = save_channels_info(ndev);
1632 teardown_driver(ndev);
1633 mlx5_vdpa_destroy_mr(&ndev->mvdev);
1634 err = mlx5_vdpa_create_mr(&ndev->mvdev, iotlb);
1638 restore_channels_info(ndev);
1639 err = setup_driver(ndev);
1646 mlx5_vdpa_destroy_mr(&ndev->mvdev);
1651 static int setup_driver(struct mlx5_vdpa_net *ndev)
1655 mutex_lock(&ndev->reslock);
1657 mlx5_vdpa_warn(&ndev->mvdev, "setup driver called for already setup driver\n");
1661 err = setup_virtqueues(ndev);
1663 mlx5_vdpa_warn(&ndev->mvdev, "setup_virtqueues\n");
1667 err = create_rqt(ndev);
1669 mlx5_vdpa_warn(&ndev->mvdev, "create_rqt\n");
1673 err = create_tir(ndev);
1675 mlx5_vdpa_warn(&ndev->mvdev, "create_tir\n");
1679 err = add_fwd_to_tir(ndev);
1681 mlx5_vdpa_warn(&ndev->mvdev, "add_fwd_to_tir\n");
1685 mutex_unlock(&ndev->reslock);
1694 teardown_virtqueues(ndev);
1696 mutex_unlock(&ndev->reslock);
1700 static void teardown_driver(struct mlx5_vdpa_net *ndev)
1702 mutex_lock(&ndev->reslock);
1706 remove_fwd_to_tir(ndev);
1709 teardown_virtqueues(ndev);
1710 ndev->setup = false;
1712 mutex_unlock(&ndev->reslock);
1715 static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
1717 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1718 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1721 print_status(mvdev, status, true);
1723 mlx5_vdpa_info(mvdev, "performing device reset\n");
1724 teardown_driver(ndev);
1725 mlx5_vdpa_destroy_mr(&ndev->mvdev);
1726 ndev->mvdev.status = 0;
1727 ndev->mvdev.mlx_features = 0;
1728 ++mvdev->generation;
1732 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
1733 if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
1734 err = setup_driver(ndev);
1736 mlx5_vdpa_warn(mvdev, "failed to setup driver\n");
1740 mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
1745 ndev->mvdev.status = status;
1749 mlx5_vdpa_destroy_mr(&ndev->mvdev);
1750 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
1753 static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, void *buf,
1756 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1757 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1759 if (offset + len < sizeof(struct virtio_net_config))
1760 memcpy(buf, (u8 *)&ndev->config + offset, len);
1763 static void mlx5_vdpa_set_config(struct vdpa_device *vdev, unsigned int offset, const void *buf,
1769 static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
1771 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1773 return mvdev->generation;
1776 static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
1778 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1779 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1783 err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
1785 mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
1790 return mlx5_vdpa_change_map(ndev, iotlb);
1795 static void mlx5_vdpa_free(struct vdpa_device *vdev)
1797 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1798 struct mlx5_vdpa_net *ndev;
1800 ndev = to_mlx5_vdpa_ndev(mvdev);
1802 free_resources(ndev);
1803 mlx5_vdpa_free_resources(&ndev->mvdev);
1804 mutex_destroy(&ndev->reslock);
1807 static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx)
1809 struct vdpa_notification_area ret = {};
1814 static int mlx5_get_vq_irq(struct vdpa_device *vdv, u16 idx)
1819 static const struct vdpa_config_ops mlx5_vdpa_ops = {
1820 .set_vq_address = mlx5_vdpa_set_vq_address,
1821 .set_vq_num = mlx5_vdpa_set_vq_num,
1822 .kick_vq = mlx5_vdpa_kick_vq,
1823 .set_vq_cb = mlx5_vdpa_set_vq_cb,
1824 .set_vq_ready = mlx5_vdpa_set_vq_ready,
1825 .get_vq_ready = mlx5_vdpa_get_vq_ready,
1826 .set_vq_state = mlx5_vdpa_set_vq_state,
1827 .get_vq_state = mlx5_vdpa_get_vq_state,
1828 .get_vq_notification = mlx5_get_vq_notification,
1829 .get_vq_irq = mlx5_get_vq_irq,
1830 .get_vq_align = mlx5_vdpa_get_vq_align,
1831 .get_features = mlx5_vdpa_get_features,
1832 .set_features = mlx5_vdpa_set_features,
1833 .set_config_cb = mlx5_vdpa_set_config_cb,
1834 .get_vq_num_max = mlx5_vdpa_get_vq_num_max,
1835 .get_device_id = mlx5_vdpa_get_device_id,
1836 .get_vendor_id = mlx5_vdpa_get_vendor_id,
1837 .get_status = mlx5_vdpa_get_status,
1838 .set_status = mlx5_vdpa_set_status,
1839 .get_config = mlx5_vdpa_get_config,
1840 .set_config = mlx5_vdpa_set_config,
1841 .get_generation = mlx5_vdpa_get_generation,
1842 .set_map = mlx5_vdpa_set_map,
1843 .free = mlx5_vdpa_free,
1846 static int alloc_resources(struct mlx5_vdpa_net *ndev)
1848 struct mlx5_vdpa_net_resources *res = &ndev->res;
1852 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n");
1856 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn);
1860 err = create_tis(ndev);
1869 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
1873 static void free_resources(struct mlx5_vdpa_net *ndev)
1875 struct mlx5_vdpa_net_resources *res = &ndev->res;
1881 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
1885 static void init_mvqs(struct mlx5_vdpa_net *ndev)
1887 struct mlx5_vdpa_virtqueue *mvq;
1890 for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
1891 mvq = &ndev->vqs[i];
1892 memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
1895 mvq->fwqp.fw = true;
1897 for (; i < ndev->mvdev.max_vqs; i++) {
1898 mvq = &ndev->vqs[i];
1899 memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
1905 void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev)
1907 struct virtio_net_config *config;
1908 struct mlx5_vdpa_dev *mvdev;
1909 struct mlx5_vdpa_net *ndev;
1913 /* we save one virtqueue for control virtqueue should we require it */
1914 max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
1915 max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
1917 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
1918 2 * mlx5_vdpa_max_qps(max_vqs));
1922 ndev->mvdev.max_vqs = max_vqs;
1923 mvdev = &ndev->mvdev;
1926 mutex_init(&ndev->reslock);
1927 config = &ndev->config;
1928 err = mlx5_query_nic_vport_mtu(mdev, &config->mtu);
1932 err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
1936 mvdev->vdev.dma_dev = mdev->device;
1937 err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
1941 err = alloc_resources(ndev);
1945 err = vdpa_register_device(&mvdev->vdev);
1952 free_resources(ndev);
1954 mlx5_vdpa_free_resources(&ndev->mvdev);
1956 mutex_destroy(&ndev->reslock);
1957 put_device(&mvdev->vdev.dev);
1958 return ERR_PTR(err);
1961 void mlx5_vdpa_remove_dev(struct mlx5_vdpa_dev *mvdev)
1963 vdpa_unregister_device(&mvdev->vdev);