1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */
4 #include <linux/module.h>
5 #include <linux/vdpa.h>
6 #include <linux/vringh.h>
7 #include <uapi/linux/virtio_net.h>
8 #include <uapi/linux/virtio_ids.h>
9 #include <linux/virtio_config.h>
10 #include <linux/auxiliary_bus.h>
11 #include <linux/mlx5/cq.h>
12 #include <linux/mlx5/qp.h>
13 #include <linux/mlx5/device.h>
14 #include <linux/mlx5/driver.h>
15 #include <linux/mlx5/vport.h>
16 #include <linux/mlx5/fs.h>
17 #include <linux/mlx5/mlx5_ifc_vdpa.h>
18 #include "mlx5_vdpa.h"
20 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
21 MODULE_DESCRIPTION("Mellanox VDPA driver");
22 MODULE_LICENSE("Dual BSD/GPL");
24 #define to_mlx5_vdpa_ndev(__mvdev) \
25 container_of(__mvdev, struct mlx5_vdpa_net, mvdev)
26 #define to_mvdev(__vdev) container_of((__vdev), struct mlx5_vdpa_dev, vdev)
28 #define VALID_FEATURES_MASK \
29 (BIT_ULL(VIRTIO_NET_F_CSUM) | BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | \
30 BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_MAC) | \
31 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | \
32 BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | \
33 BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | BIT_ULL(VIRTIO_NET_F_HOST_ECN) | BIT_ULL(VIRTIO_NET_F_HOST_UFO) | \
34 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | BIT_ULL(VIRTIO_NET_F_STATUS) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | \
35 BIT_ULL(VIRTIO_NET_F_CTRL_RX) | BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) | \
36 BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) | BIT_ULL(VIRTIO_NET_F_GUEST_ANNOUNCE) | \
37 BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | BIT_ULL(VIRTIO_NET_F_HASH_REPORT) | \
38 BIT_ULL(VIRTIO_NET_F_RSS) | BIT_ULL(VIRTIO_NET_F_RSC_EXT) | BIT_ULL(VIRTIO_NET_F_STANDBY) | \
39 BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX) | BIT_ULL(VIRTIO_F_NOTIFY_ON_EMPTY) | \
40 BIT_ULL(VIRTIO_F_ANY_LAYOUT) | BIT_ULL(VIRTIO_F_VERSION_1) | BIT_ULL(VIRTIO_F_ACCESS_PLATFORM) | \
41 BIT_ULL(VIRTIO_F_RING_PACKED) | BIT_ULL(VIRTIO_F_ORDER_PLATFORM) | BIT_ULL(VIRTIO_F_SR_IOV))
43 #define VALID_STATUS_MASK \
44 (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK | \
45 VIRTIO_CONFIG_S_FEATURES_OK | VIRTIO_CONFIG_S_NEEDS_RESET | VIRTIO_CONFIG_S_FAILED)
47 struct mlx5_vdpa_net_resources {
55 struct mlx5_vdpa_cq_buf {
56 struct mlx5_frag_buf_ctrl fbc;
57 struct mlx5_frag_buf frag_buf;
63 struct mlx5_core_cq mcq;
64 struct mlx5_vdpa_cq_buf buf;
69 struct mlx5_vdpa_umem {
70 struct mlx5_frag_buf_ctrl fbc;
71 struct mlx5_frag_buf frag_buf;
77 struct mlx5_core_qp mqp;
78 struct mlx5_frag_buf frag_buf;
84 struct mlx5_vq_restore_info {
91 struct vdpa_callback cb;
95 struct mlx5_vdpa_virtqueue {
101 struct vdpa_callback event_cb;
103 /* Resources for implementing the notification channel from the device
104 * to the driver. fwqp is the firmware end of an RC connection; the
105 * other end is vqqp used by the driver. cq is is where completions are
108 struct mlx5_vdpa_cq cq;
109 struct mlx5_vdpa_qp fwqp;
110 struct mlx5_vdpa_qp vqqp;
112 /* umem resources are required for the virtqueue operation. They're use
113 * is internal and they must be provided by the driver.
115 struct mlx5_vdpa_umem umem1;
116 struct mlx5_vdpa_umem umem2;
117 struct mlx5_vdpa_umem umem3;
122 struct mlx5_vdpa_net *ndev;
126 /* keep last in the struct */
127 struct mlx5_vq_restore_info ri;
130 /* We will remove this limitation once mlx5_vdpa_alloc_resources()
131 * provides for driver space allocation
133 #define MLX5_MAX_SUPPORTED_VQS 16
135 struct mlx5_vdpa_net {
136 struct mlx5_vdpa_dev mvdev;
137 struct mlx5_vdpa_net_resources res;
138 struct virtio_net_config config;
139 struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS];
141 /* Serialize vq resources creation and destruction. This is required
142 * since memory map might change and we need to destroy and create
143 * resources while driver in operational.
145 struct mutex reslock;
146 struct mlx5_flow_table *rxft;
147 struct mlx5_fc *rx_counter;
148 struct mlx5_flow_handle *rx_rule;
153 static void free_resources(struct mlx5_vdpa_net *ndev);
154 static void init_mvqs(struct mlx5_vdpa_net *ndev);
155 static int setup_driver(struct mlx5_vdpa_net *ndev);
156 static void teardown_driver(struct mlx5_vdpa_net *ndev);
158 static bool mlx5_vdpa_debug;
160 #define MLX5_LOG_VIO_FLAG(_feature) \
162 if (features & BIT_ULL(_feature)) \
163 mlx5_vdpa_info(mvdev, "%s\n", #_feature); \
166 #define MLX5_LOG_VIO_STAT(_status) \
168 if (status & (_status)) \
169 mlx5_vdpa_info(mvdev, "%s\n", #_status); \
172 static inline u32 mlx5_vdpa_max_qps(int max_vqs)
177 static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set)
179 if (status & ~VALID_STATUS_MASK)
180 mlx5_vdpa_warn(mvdev, "Warning: there are invalid status bits 0x%x\n",
181 status & ~VALID_STATUS_MASK);
183 if (!mlx5_vdpa_debug)
186 mlx5_vdpa_info(mvdev, "driver status %s", set ? "set" : "get");
187 if (set && !status) {
188 mlx5_vdpa_info(mvdev, "driver resets the device\n");
192 MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_ACKNOWLEDGE);
193 MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER);
194 MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER_OK);
195 MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FEATURES_OK);
196 MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_NEEDS_RESET);
197 MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FAILED);
200 static void print_features(struct mlx5_vdpa_dev *mvdev, u64 features, bool set)
202 if (features & ~VALID_FEATURES_MASK)
203 mlx5_vdpa_warn(mvdev, "There are invalid feature bits 0x%llx\n",
204 features & ~VALID_FEATURES_MASK);
206 if (!mlx5_vdpa_debug)
209 mlx5_vdpa_info(mvdev, "driver %s feature bits:\n", set ? "sets" : "reads");
211 mlx5_vdpa_info(mvdev, "all feature bits are cleared\n");
213 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CSUM);
214 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_CSUM);
215 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
216 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MTU);
217 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MAC);
218 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO4);
219 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO6);
220 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ECN);
221 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_UFO);
222 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO4);
223 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO6);
224 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_ECN);
225 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_UFO);
226 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MRG_RXBUF);
227 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STATUS);
228 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VQ);
229 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX);
230 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VLAN);
231 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX_EXTRA);
232 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ANNOUNCE);
233 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MQ);
234 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_MAC_ADDR);
235 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HASH_REPORT);
236 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSS);
237 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSC_EXT);
238 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STANDBY);
239 MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_SPEED_DUPLEX);
240 MLX5_LOG_VIO_FLAG(VIRTIO_F_NOTIFY_ON_EMPTY);
241 MLX5_LOG_VIO_FLAG(VIRTIO_F_ANY_LAYOUT);
242 MLX5_LOG_VIO_FLAG(VIRTIO_F_VERSION_1);
243 MLX5_LOG_VIO_FLAG(VIRTIO_F_ACCESS_PLATFORM);
244 MLX5_LOG_VIO_FLAG(VIRTIO_F_RING_PACKED);
245 MLX5_LOG_VIO_FLAG(VIRTIO_F_ORDER_PLATFORM);
246 MLX5_LOG_VIO_FLAG(VIRTIO_F_SR_IOV);
249 static int create_tis(struct mlx5_vdpa_net *ndev)
251 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
252 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
256 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
257 MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn);
258 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn);
260 mlx5_vdpa_warn(mvdev, "create TIS (%d)\n", err);
265 static void destroy_tis(struct mlx5_vdpa_net *ndev)
267 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn);
270 #define MLX5_VDPA_CQE_SIZE 64
271 #define MLX5_VDPA_LOG_CQE_SIZE ilog2(MLX5_VDPA_CQE_SIZE)
273 static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent)
275 struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
276 u8 log_wq_stride = MLX5_VDPA_LOG_CQE_SIZE;
277 u8 log_wq_sz = MLX5_VDPA_LOG_CQE_SIZE;
280 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf,
281 ndev->mvdev.mdev->priv.numa_node);
285 mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
287 buf->cqe_size = MLX5_VDPA_CQE_SIZE;
293 static int umem_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem, int size)
295 struct mlx5_frag_buf *frag_buf = &umem->frag_buf;
297 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf,
298 ndev->mvdev.mdev->priv.numa_node);
301 static void cq_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf)
303 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf);
306 static void *get_cqe(struct mlx5_vdpa_cq *vcq, int n)
308 return mlx5_frag_buf_get_wqe(&vcq->buf.fbc, n);
311 static void cq_frag_buf_init(struct mlx5_vdpa_cq *vcq, struct mlx5_vdpa_cq_buf *buf)
313 struct mlx5_cqe64 *cqe64;
317 for (i = 0; i < buf->nent; i++) {
318 cqe = get_cqe(vcq, i);
320 cqe64->op_own = MLX5_CQE_INVALID << 4;
324 static void *get_sw_cqe(struct mlx5_vdpa_cq *cq, int n)
326 struct mlx5_cqe64 *cqe64 = get_cqe(cq, n & (cq->cqe - 1));
328 if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
329 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & cq->cqe)))
335 static void rx_post(struct mlx5_vdpa_qp *vqp, int n)
338 vqp->db.db[0] = cpu_to_be32(vqp->head);
341 static void qp_prepare(struct mlx5_vdpa_net *ndev, bool fw, void *in,
342 struct mlx5_vdpa_virtqueue *mvq, u32 num_ent)
344 struct mlx5_vdpa_qp *vqp;
348 vqp = fw ? &mvq->fwqp : &mvq->vqqp;
349 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid);
350 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
352 /* Firmware QP is allocated by the driver for the firmware's
353 * use so we can skip part of the params as they will be chosen by firmware
355 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
356 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
357 MLX5_SET(qpc, qpc, no_sq, 1);
361 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
362 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
363 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
364 MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
365 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index);
366 MLX5_SET(qpc, qpc, log_page_size, vqp->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
367 MLX5_SET(qpc, qpc, no_sq, 1);
368 MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn);
369 MLX5_SET(qpc, qpc, log_rq_size, ilog2(num_ent));
370 MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
371 pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas);
372 mlx5_fill_page_frag_array(&vqp->frag_buf, pas);
375 static int rq_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp, u32 num_ent)
377 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev,
378 num_ent * sizeof(struct mlx5_wqe_data_seg), &vqp->frag_buf,
379 ndev->mvdev.mdev->priv.numa_node);
382 static void rq_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp)
384 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf);
387 static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
388 struct mlx5_vdpa_qp *vqp)
390 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
391 int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
392 u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
399 err = rq_buf_alloc(ndev, vqp, mvq->num_ent);
403 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db);
406 inlen += vqp->frag_buf.npages * sizeof(__be64);
409 in = kzalloc(inlen, GFP_KERNEL);
415 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent);
416 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
417 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
418 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
419 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
420 MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
422 MLX5_SET64(qpc, qpc, dbr_addr, vqp->db.dma);
423 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
424 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
429 vqp->mqp.uid = ndev->mvdev.res.uid;
430 vqp->mqp.qpn = MLX5_GET(create_qp_out, out, qpn);
433 rx_post(vqp, mvq->num_ent);
439 mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
442 rq_buf_free(ndev, vqp);
447 static void qp_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp)
449 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
451 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
452 MLX5_SET(destroy_qp_in, in, qpn, vqp->mqp.qpn);
453 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid);
454 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in))
455 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn);
457 mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
458 rq_buf_free(ndev, vqp);
462 static void *next_cqe_sw(struct mlx5_vdpa_cq *cq)
464 return get_sw_cqe(cq, cq->mcq.cons_index);
467 static int mlx5_vdpa_poll_one(struct mlx5_vdpa_cq *vcq)
469 struct mlx5_cqe64 *cqe64;
471 cqe64 = next_cqe_sw(vcq);
475 vcq->mcq.cons_index++;
479 static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num)
481 mlx5_cq_set_ci(&mvq->cq.mcq);
483 /* make sure CQ cosumer update is visible to the hardware before updating
484 * RX doorbell record.
487 rx_post(&mvq->vqqp, num);
488 if (mvq->event_cb.callback)
489 mvq->event_cb.callback(mvq->event_cb.private);
492 static void mlx5_vdpa_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
494 struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq);
495 struct mlx5_vdpa_net *ndev = mvq->ndev;
496 void __iomem *uar_page = ndev->mvdev.res.uar->map;
499 while (!mlx5_vdpa_poll_one(&mvq->cq)) {
501 if (num > mvq->num_ent / 2) {
502 /* If completions keep coming while we poll, we want to
503 * let the hardware know that we consumed them by
504 * updating the doorbell record. We also let vdpa core
505 * know about this so it passes it on the virtio driver
508 mlx5_vdpa_handle_completions(mvq, num);
514 mlx5_vdpa_handle_completions(mvq, num);
516 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
519 static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
521 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
522 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
523 void __iomem *uar_page = ndev->mvdev.res.uar->map;
524 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
525 struct mlx5_vdpa_cq *vcq = &mvq->cq;
534 err = mlx5_db_alloc(mdev, &vcq->db);
538 vcq->mcq.set_ci_db = vcq->db.db;
539 vcq->mcq.arm_db = vcq->db.db + 1;
540 vcq->mcq.cqe_sz = 64;
542 err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent);
546 cq_frag_buf_init(vcq, &vcq->buf);
548 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
549 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * vcq->buf.frag_buf.npages;
550 in = kzalloc(inlen, GFP_KERNEL);
556 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid);
557 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
558 mlx5_fill_page_frag_array(&vcq->buf.frag_buf, pas);
560 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
561 MLX5_SET(cqc, cqc, log_page_size, vcq->buf.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
563 /* Use vector 0 by default. Consider adding code to choose least used
566 err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn);
570 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
571 MLX5_SET(cqc, cqc, log_cq_size, ilog2(num_ent));
572 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index);
573 MLX5_SET(cqc, cqc, c_eqn, eqn);
574 MLX5_SET64(cqc, cqc, dbr_addr, vcq->db.dma);
576 err = mlx5_core_create_cq(mdev, &vcq->mcq, in, inlen, out, sizeof(out));
580 vcq->mcq.comp = mlx5_vdpa_cq_comp;
582 vcq->mcq.set_ci_db = vcq->db.db;
583 vcq->mcq.arm_db = vcq->db.db + 1;
584 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
591 cq_frag_buf_free(ndev, &vcq->buf);
593 mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
597 static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
599 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
600 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
601 struct mlx5_vdpa_cq *vcq = &mvq->cq;
603 if (mlx5_core_destroy_cq(mdev, &vcq->mcq)) {
604 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn);
607 cq_frag_buf_free(ndev, &vcq->buf);
608 mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
611 static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
612 struct mlx5_vdpa_umem **umemp)
614 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
620 p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a);
621 p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b);
622 *umemp = &mvq->umem1;
625 p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a);
626 p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b);
627 *umemp = &mvq->umem2;
630 p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a);
631 p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b);
632 *umemp = &mvq->umem3;
635 return p_a * mvq->num_ent + p_b;
638 static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem)
640 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf);
643 static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
646 u32 out[MLX5_ST_SZ_DW(create_umem_out)] = {};
652 struct mlx5_vdpa_umem *umem;
654 size = umem_size(ndev, mvq, num, &umem);
659 err = umem_frag_buf_alloc(ndev, umem, size);
663 inlen = MLX5_ST_SZ_BYTES(create_umem_in) + MLX5_ST_SZ_BYTES(mtt) * umem->frag_buf.npages;
665 in = kzalloc(inlen, GFP_KERNEL);
671 MLX5_SET(create_umem_in, in, opcode, MLX5_CMD_OP_CREATE_UMEM);
672 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid);
673 um = MLX5_ADDR_OF(create_umem_in, in, umem);
674 MLX5_SET(umem, um, log_page_size, umem->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
675 MLX5_SET64(umem, um, num_of_mtt, umem->frag_buf.npages);
677 pas = (__be64 *)MLX5_ADDR_OF(umem, um, mtt[0]);
678 mlx5_fill_page_frag_array_perm(&umem->frag_buf, pas, MLX5_MTT_PERM_RW);
680 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
682 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err);
687 umem->id = MLX5_GET(create_umem_out, out, umem_id);
694 umem_frag_buf_free(ndev, umem);
698 static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
700 u32 in[MLX5_ST_SZ_DW(destroy_umem_in)] = {};
701 u32 out[MLX5_ST_SZ_DW(destroy_umem_out)] = {};
702 struct mlx5_vdpa_umem *umem;
716 MLX5_SET(destroy_umem_in, in, opcode, MLX5_CMD_OP_DESTROY_UMEM);
717 MLX5_SET(destroy_umem_in, in, umem_id, umem->id);
718 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)))
721 umem_frag_buf_free(ndev, umem);
724 static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
729 for (num = 1; num <= 3; num++) {
730 err = create_umem(ndev, mvq, num);
737 for (num--; num > 0; num--)
738 umem_destroy(ndev, mvq, num);
743 static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
747 for (num = 3; num > 0; num--)
748 umem_destroy(ndev, mvq, num);
751 static int get_queue_type(struct mlx5_vdpa_net *ndev)
755 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
757 /* prefer split queue */
758 if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED)
759 return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
761 WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT));
763 return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
766 static bool vq_is_tx(u16 idx)
771 static u16 get_features_12_3(u64 features)
773 return (!!(features & BIT_ULL(VIRTIO_NET_F_HOST_TSO4)) << 9) |
774 (!!(features & BIT_ULL(VIRTIO_NET_F_HOST_TSO6)) << 8) |
775 (!!(features & BIT_ULL(VIRTIO_NET_F_CSUM)) << 7) |
776 (!!(features & BIT_ULL(VIRTIO_NET_F_GUEST_CSUM)) << 6);
779 static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
781 int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in);
782 u32 out[MLX5_ST_SZ_DW(create_virtio_net_q_out)] = {};
789 err = umems_create(ndev, mvq);
793 in = kzalloc(inlen, GFP_KERNEL);
799 cmd_hdr = MLX5_ADDR_OF(create_virtio_net_q_in, in, general_obj_in_cmd_hdr);
801 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
802 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
803 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
805 obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
806 MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
807 MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
808 get_features_12_3(ndev->mvdev.actual_features));
809 vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
810 MLX5_SET(virtio_q, vq_ctx, virtio_q_type, get_queue_type(ndev));
812 if (vq_is_tx(mvq->index))
813 MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn);
815 MLX5_SET(virtio_q, vq_ctx, event_mode, MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE);
816 MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index);
817 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
818 MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
819 MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
820 !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1));
821 MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
822 MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
823 MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
824 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey.key);
825 MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
826 MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
827 MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id);
828 MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem1.size);
829 MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
830 MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem1.size);
831 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
832 if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type))
833 MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1);
835 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
840 mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
847 umems_destroy(ndev, mvq);
851 static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
853 u32 in[MLX5_ST_SZ_DW(destroy_virtio_net_q_in)] = {};
854 u32 out[MLX5_ST_SZ_DW(destroy_virtio_net_q_out)] = {};
856 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.opcode,
857 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
858 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id);
859 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid);
860 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_type,
861 MLX5_OBJ_TYPE_VIRTIO_NET_Q);
862 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) {
863 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id);
866 umems_destroy(ndev, mvq);
869 static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
871 return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn;
874 static u32 get_qpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
876 return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn;
879 static void alloc_inout(struct mlx5_vdpa_net *ndev, int cmd, void **in, int *inlen, void **out,
880 int *outlen, u32 qpn, u32 rqpn)
886 case MLX5_CMD_OP_2RST_QP:
887 *inlen = MLX5_ST_SZ_BYTES(qp_2rst_in);
888 *outlen = MLX5_ST_SZ_BYTES(qp_2rst_out);
889 *in = kzalloc(*inlen, GFP_KERNEL);
890 *out = kzalloc(*outlen, GFP_KERNEL);
894 MLX5_SET(qp_2rst_in, *in, opcode, cmd);
895 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid);
896 MLX5_SET(qp_2rst_in, *in, qpn, qpn);
898 case MLX5_CMD_OP_RST2INIT_QP:
899 *inlen = MLX5_ST_SZ_BYTES(rst2init_qp_in);
900 *outlen = MLX5_ST_SZ_BYTES(rst2init_qp_out);
901 *in = kzalloc(*inlen, GFP_KERNEL);
902 *out = kzalloc(MLX5_ST_SZ_BYTES(rst2init_qp_out), GFP_KERNEL);
906 MLX5_SET(rst2init_qp_in, *in, opcode, cmd);
907 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid);
908 MLX5_SET(rst2init_qp_in, *in, qpn, qpn);
909 qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
910 MLX5_SET(qpc, qpc, remote_qpn, rqpn);
911 MLX5_SET(qpc, qpc, rwe, 1);
912 pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
913 MLX5_SET(ads, pp, vhca_port_num, 1);
915 case MLX5_CMD_OP_INIT2RTR_QP:
916 *inlen = MLX5_ST_SZ_BYTES(init2rtr_qp_in);
917 *outlen = MLX5_ST_SZ_BYTES(init2rtr_qp_out);
918 *in = kzalloc(*inlen, GFP_KERNEL);
919 *out = kzalloc(MLX5_ST_SZ_BYTES(init2rtr_qp_out), GFP_KERNEL);
923 MLX5_SET(init2rtr_qp_in, *in, opcode, cmd);
924 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid);
925 MLX5_SET(init2rtr_qp_in, *in, qpn, qpn);
926 qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
927 MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
928 MLX5_SET(qpc, qpc, log_msg_max, 30);
929 MLX5_SET(qpc, qpc, remote_qpn, rqpn);
930 pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
931 MLX5_SET(ads, pp, fl, 1);
933 case MLX5_CMD_OP_RTR2RTS_QP:
934 *inlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_in);
935 *outlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_out);
936 *in = kzalloc(*inlen, GFP_KERNEL);
937 *out = kzalloc(MLX5_ST_SZ_BYTES(rtr2rts_qp_out), GFP_KERNEL);
941 MLX5_SET(rtr2rts_qp_in, *in, opcode, cmd);
942 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid);
943 MLX5_SET(rtr2rts_qp_in, *in, qpn, qpn);
944 qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
945 pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
946 MLX5_SET(ads, pp, ack_timeout, 14);
947 MLX5_SET(qpc, qpc, retry_count, 7);
948 MLX5_SET(qpc, qpc, rnr_retry, 7);
964 static void free_inout(void *in, void *out)
970 /* Two QPs are used by each virtqueue. One is used by the driver and one by
971 * firmware. The fw argument indicates whether the subjected QP is the one used
974 static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd)
982 alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw));
986 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen);
991 static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
995 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP);
999 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP);
1003 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP);
1007 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP);
1011 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP);
1015 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP);
1019 return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP);
1022 struct mlx5_virtq_attr {
1024 u16 available_index;
1027 static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
1028 struct mlx5_virtq_attr *attr)
1030 int outlen = MLX5_ST_SZ_BYTES(query_virtio_net_q_out);
1031 u32 in[MLX5_ST_SZ_DW(query_virtio_net_q_in)] = {};
1037 out = kzalloc(outlen, GFP_KERNEL);
1041 cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, in, general_obj_in_cmd_hdr);
1043 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1044 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
1045 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
1046 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
1047 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen);
1051 obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, out, obj_context);
1052 memset(attr, 0, sizeof(*attr));
1053 attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
1054 attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
1063 static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state)
1065 int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
1066 u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
1072 in = kzalloc(inlen, GFP_KERNEL);
1076 cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, in, general_obj_in_cmd_hdr);
1078 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1079 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
1080 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
1081 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
1083 obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
1084 MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select,
1085 MLX5_VIRTQ_MODIFY_MASK_STATE);
1086 MLX5_SET(virtio_net_q_object, obj_context, state, state);
1087 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
1090 mvq->fw_state = state;
1095 static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1097 u16 idx = mvq->index;
1103 if (mvq->initialized) {
1104 mlx5_vdpa_warn(&ndev->mvdev, "attempt re init\n");
1108 err = cq_create(ndev, idx, mvq->num_ent);
1112 err = qp_create(ndev, mvq, &mvq->fwqp);
1116 err = qp_create(ndev, mvq, &mvq->vqqp);
1120 err = connect_qps(ndev, mvq);
1124 err = create_virtqueue(ndev, mvq);
1129 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
1131 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n",
1137 mvq->initialized = true;
1141 qp_destroy(ndev, &mvq->vqqp);
1143 qp_destroy(ndev, &mvq->fwqp);
1145 cq_destroy(ndev, idx);
1149 static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1151 struct mlx5_virtq_attr attr;
1153 if (!mvq->initialized)
1156 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
1159 if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND))
1160 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n");
1162 if (query_virtqueue(ndev, mvq, &attr)) {
1163 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n");
1166 mvq->avail_idx = attr.available_index;
1169 static void suspend_vqs(struct mlx5_vdpa_net *ndev)
1173 for (i = 0; i < MLX5_MAX_SUPPORTED_VQS; i++)
1174 suspend_vq(ndev, &ndev->vqs[i]);
1177 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1179 if (!mvq->initialized)
1182 suspend_vq(ndev, mvq);
1183 destroy_virtqueue(ndev, mvq);
1184 qp_destroy(ndev, &mvq->vqqp);
1185 qp_destroy(ndev, &mvq->fwqp);
1186 cq_destroy(ndev, mvq->index);
1187 mvq->initialized = false;
1190 static int create_rqt(struct mlx5_vdpa_net *ndev)
1200 log_max_rqt = min_t(int, 1, MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
1201 if (log_max_rqt < 1)
1204 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + (1 << log_max_rqt) * MLX5_ST_SZ_BYTES(rq_num);
1205 in = kzalloc(inlen, GFP_KERNEL);
1209 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid);
1210 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1212 MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
1213 MLX5_SET(rqtc, rqtc, rqt_max_size, 1 << log_max_rqt);
1214 MLX5_SET(rqtc, rqtc, rqt_actual_size, 1);
1215 list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
1216 for (i = 0, j = 0; j < ndev->mvdev.max_vqs; j++) {
1217 if (!ndev->vqs[j].initialized)
1220 if (!vq_is_tx(ndev->vqs[j].index)) {
1221 list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
1226 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
1234 static void destroy_rqt(struct mlx5_vdpa_net *ndev)
1236 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn);
1239 static int create_tir(struct mlx5_vdpa_net *ndev)
1241 #define HASH_IP_L4PORTS \
1242 (MLX5_HASH_FIELD_SEL_SRC_IP | MLX5_HASH_FIELD_SEL_DST_IP | MLX5_HASH_FIELD_SEL_L4_SPORT | \
1243 MLX5_HASH_FIELD_SEL_L4_DPORT)
1244 static const u8 rx_hash_toeplitz_key[] = { 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7,
1245 0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94,
1246 0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1,
1247 0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59,
1248 0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a };
1255 in = kzalloc(MLX5_ST_SZ_BYTES(create_tir_in), GFP_KERNEL);
1259 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid);
1260 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1261 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
1263 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1264 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
1265 rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
1266 memcpy(rss_key, rx_hash_toeplitz_key, sizeof(rx_hash_toeplitz_key));
1268 outer = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1269 MLX5_SET(rx_hash_field_select, outer, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4);
1270 MLX5_SET(rx_hash_field_select, outer, l4_prot_type, MLX5_L4_PROT_TYPE_TCP);
1271 MLX5_SET(rx_hash_field_select, outer, selected_fields, HASH_IP_L4PORTS);
1273 MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn);
1274 MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn);
1276 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn);
1281 static void destroy_tir(struct mlx5_vdpa_net *ndev)
1283 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn);
1286 static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
1288 struct mlx5_flow_destination dest[2] = {};
1289 struct mlx5_flow_table_attr ft_attr = {};
1290 struct mlx5_flow_act flow_act = {};
1291 struct mlx5_flow_namespace *ns;
1294 /* for now, one entry, match all, forward to tir */
1295 ft_attr.max_fte = 1;
1296 ft_attr.autogroup.max_num_groups = 1;
1298 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
1300 mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n");
1304 ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
1305 if (IS_ERR(ndev->rxft))
1306 return PTR_ERR(ndev->rxft);
1308 ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false);
1309 if (IS_ERR(ndev->rx_counter)) {
1310 err = PTR_ERR(ndev->rx_counter);
1314 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT;
1315 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1316 dest[0].tir_num = ndev->res.tirn;
1317 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1318 dest[1].counter_id = mlx5_fc_id(ndev->rx_counter);
1319 ndev->rx_rule = mlx5_add_flow_rules(ndev->rxft, NULL, &flow_act, dest, 2);
1320 if (IS_ERR(ndev->rx_rule)) {
1321 err = PTR_ERR(ndev->rx_rule);
1322 ndev->rx_rule = NULL;
1329 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
1331 mlx5_destroy_flow_table(ndev->rxft);
1335 static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
1340 mlx5_del_flow_rules(ndev->rx_rule);
1341 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
1342 mlx5_destroy_flow_table(ndev->rxft);
1344 ndev->rx_rule = NULL;
1347 static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
1349 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1350 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1351 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1353 if (unlikely(!mvq->ready))
1356 iowrite16(idx, ndev->mvdev.res.kick_addr);
1359 static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_area,
1360 u64 driver_area, u64 device_area)
1362 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1363 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1364 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1366 mvq->desc_addr = desc_area;
1367 mvq->device_addr = device_area;
1368 mvq->driver_addr = driver_area;
1372 static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
1374 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1375 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1376 struct mlx5_vdpa_virtqueue *mvq;
1378 mvq = &ndev->vqs[idx];
1382 static void mlx5_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb)
1384 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1385 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1386 struct mlx5_vdpa_virtqueue *vq = &ndev->vqs[idx];
1391 static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready)
1393 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1394 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1395 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1398 suspend_vq(ndev, mvq);
1403 static bool mlx5_vdpa_get_vq_ready(struct vdpa_device *vdev, u16 idx)
1405 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1406 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1407 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1412 static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
1413 const struct vdpa_vq_state *state)
1415 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1416 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1417 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1419 if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) {
1420 mlx5_vdpa_warn(mvdev, "can't modify available index\n");
1424 mvq->avail_idx = state->avail_index;
1428 static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa_vq_state *state)
1430 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1431 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1432 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1433 struct mlx5_virtq_attr attr;
1436 /* If the virtq object was destroyed, use the value saved at
1437 * the last minute of suspend_vq. This caters for userspace
1438 * that cares about emulating the index after vq is stopped.
1440 if (!mvq->initialized) {
1441 state->avail_index = mvq->avail_idx;
1445 err = query_virtqueue(ndev, mvq, &attr);
1447 mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
1450 state->avail_index = attr.available_index;
1454 static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
1459 enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
1460 MLX5_VIRTIO_NET_F_CSUM = 1 << 10,
1461 MLX5_VIRTIO_NET_F_HOST_TSO6 = 1 << 11,
1462 MLX5_VIRTIO_NET_F_HOST_TSO4 = 1 << 12,
1465 static u64 mlx_to_vritio_features(u16 dev_features)
1469 if (dev_features & MLX5_VIRTIO_NET_F_GUEST_CSUM)
1470 result |= BIT_ULL(VIRTIO_NET_F_GUEST_CSUM);
1471 if (dev_features & MLX5_VIRTIO_NET_F_CSUM)
1472 result |= BIT_ULL(VIRTIO_NET_F_CSUM);
1473 if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO6)
1474 result |= BIT_ULL(VIRTIO_NET_F_HOST_TSO6);
1475 if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO4)
1476 result |= BIT_ULL(VIRTIO_NET_F_HOST_TSO4);
1481 static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
1483 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1484 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1487 dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask);
1488 ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features);
1489 if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0))
1490 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1);
1491 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
1492 print_features(mvdev, ndev->mvdev.mlx_features, false);
1493 return ndev->mvdev.mlx_features;
1496 static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features)
1498 if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)))
1504 static int setup_virtqueues(struct mlx5_vdpa_net *ndev)
1509 for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); i++) {
1510 err = setup_vq(ndev, &ndev->vqs[i]);
1518 for (--i; i >= 0; i--)
1519 teardown_vq(ndev, &ndev->vqs[i]);
1524 static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
1526 struct mlx5_vdpa_virtqueue *mvq;
1529 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
1530 mvq = &ndev->vqs[i];
1531 if (!mvq->initialized)
1534 teardown_vq(ndev, mvq);
1538 /* TODO: cross-endian support */
1539 static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
1541 return virtio_legacy_is_little_endian() ||
1542 (mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
1545 static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
1547 return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
1550 static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
1552 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1553 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1556 print_features(mvdev, features, true);
1558 err = verify_min_features(mvdev, features);
1562 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
1563 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu);
1564 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
1568 static void mlx5_vdpa_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb)
1570 /* not implemented */
1571 mlx5_vdpa_warn(to_mvdev(vdev), "set config callback not supported\n");
1574 #define MLX5_VDPA_MAX_VQ_ENTRIES 256
1575 static u16 mlx5_vdpa_get_vq_num_max(struct vdpa_device *vdev)
1577 return MLX5_VDPA_MAX_VQ_ENTRIES;
1580 static u32 mlx5_vdpa_get_device_id(struct vdpa_device *vdev)
1582 return VIRTIO_ID_NET;
1585 static u32 mlx5_vdpa_get_vendor_id(struct vdpa_device *vdev)
1587 return PCI_VENDOR_ID_MELLANOX;
1590 static u8 mlx5_vdpa_get_status(struct vdpa_device *vdev)
1592 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1593 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1595 print_status(mvdev, ndev->mvdev.status, false);
1596 return ndev->mvdev.status;
1599 static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1601 struct mlx5_vq_restore_info *ri = &mvq->ri;
1602 struct mlx5_virtq_attr attr;
1605 if (!mvq->initialized)
1608 err = query_virtqueue(ndev, mvq, &attr);
1612 ri->avail_index = attr.available_index;
1613 ri->ready = mvq->ready;
1614 ri->num_ent = mvq->num_ent;
1615 ri->desc_addr = mvq->desc_addr;
1616 ri->device_addr = mvq->device_addr;
1617 ri->driver_addr = mvq->driver_addr;
1618 ri->cb = mvq->event_cb;
1623 static int save_channels_info(struct mlx5_vdpa_net *ndev)
1627 for (i = 0; i < ndev->mvdev.max_vqs; i++) {
1628 memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri));
1629 save_channel_info(ndev, &ndev->vqs[i]);
1634 static void mlx5_clear_vqs(struct mlx5_vdpa_net *ndev)
1638 for (i = 0; i < ndev->mvdev.max_vqs; i++)
1639 memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
1642 static void restore_channels_info(struct mlx5_vdpa_net *ndev)
1644 struct mlx5_vdpa_virtqueue *mvq;
1645 struct mlx5_vq_restore_info *ri;
1648 mlx5_clear_vqs(ndev);
1650 for (i = 0; i < ndev->mvdev.max_vqs; i++) {
1651 mvq = &ndev->vqs[i];
1656 mvq->avail_idx = ri->avail_index;
1657 mvq->ready = ri->ready;
1658 mvq->num_ent = ri->num_ent;
1659 mvq->desc_addr = ri->desc_addr;
1660 mvq->device_addr = ri->device_addr;
1661 mvq->driver_addr = ri->driver_addr;
1662 mvq->event_cb = ri->cb;
1666 static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *iotlb)
1671 err = save_channels_info(ndev);
1675 teardown_driver(ndev);
1676 mlx5_vdpa_destroy_mr(&ndev->mvdev);
1677 err = mlx5_vdpa_create_mr(&ndev->mvdev, iotlb);
1681 if (!(ndev->mvdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
1684 restore_channels_info(ndev);
1685 err = setup_driver(ndev);
1692 mlx5_vdpa_destroy_mr(&ndev->mvdev);
1697 static int setup_driver(struct mlx5_vdpa_net *ndev)
1701 mutex_lock(&ndev->reslock);
1703 mlx5_vdpa_warn(&ndev->mvdev, "setup driver called for already setup driver\n");
1707 err = setup_virtqueues(ndev);
1709 mlx5_vdpa_warn(&ndev->mvdev, "setup_virtqueues\n");
1713 err = create_rqt(ndev);
1715 mlx5_vdpa_warn(&ndev->mvdev, "create_rqt\n");
1719 err = create_tir(ndev);
1721 mlx5_vdpa_warn(&ndev->mvdev, "create_tir\n");
1725 err = add_fwd_to_tir(ndev);
1727 mlx5_vdpa_warn(&ndev->mvdev, "add_fwd_to_tir\n");
1731 mutex_unlock(&ndev->reslock);
1740 teardown_virtqueues(ndev);
1742 mutex_unlock(&ndev->reslock);
1746 static void teardown_driver(struct mlx5_vdpa_net *ndev)
1748 mutex_lock(&ndev->reslock);
1752 remove_fwd_to_tir(ndev);
1755 teardown_virtqueues(ndev);
1756 ndev->setup = false;
1758 mutex_unlock(&ndev->reslock);
1761 static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
1763 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1764 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1767 print_status(mvdev, status, true);
1769 mlx5_vdpa_info(mvdev, "performing device reset\n");
1770 teardown_driver(ndev);
1771 mlx5_vdpa_destroy_mr(&ndev->mvdev);
1772 ndev->mvdev.status = 0;
1773 ndev->mvdev.mlx_features = 0;
1774 ++mvdev->generation;
1778 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
1779 if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
1780 err = setup_driver(ndev);
1782 mlx5_vdpa_warn(mvdev, "failed to setup driver\n");
1786 mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
1791 ndev->mvdev.status = status;
1795 mlx5_vdpa_destroy_mr(&ndev->mvdev);
1796 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
1799 static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, void *buf,
1802 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1803 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1805 if (offset + len < sizeof(struct virtio_net_config))
1806 memcpy(buf, (u8 *)&ndev->config + offset, len);
1809 static void mlx5_vdpa_set_config(struct vdpa_device *vdev, unsigned int offset, const void *buf,
1815 static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
1817 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1819 return mvdev->generation;
1822 static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
1824 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1825 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1829 err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
1831 mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
1836 return mlx5_vdpa_change_map(ndev, iotlb);
1841 static void mlx5_vdpa_free(struct vdpa_device *vdev)
1843 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1844 struct mlx5_vdpa_net *ndev;
1846 ndev = to_mlx5_vdpa_ndev(mvdev);
1848 free_resources(ndev);
1849 mlx5_vdpa_free_resources(&ndev->mvdev);
1850 mutex_destroy(&ndev->reslock);
1853 static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx)
1855 struct vdpa_notification_area ret = {};
1860 static int mlx5_get_vq_irq(struct vdpa_device *vdv, u16 idx)
1865 static const struct vdpa_config_ops mlx5_vdpa_ops = {
1866 .set_vq_address = mlx5_vdpa_set_vq_address,
1867 .set_vq_num = mlx5_vdpa_set_vq_num,
1868 .kick_vq = mlx5_vdpa_kick_vq,
1869 .set_vq_cb = mlx5_vdpa_set_vq_cb,
1870 .set_vq_ready = mlx5_vdpa_set_vq_ready,
1871 .get_vq_ready = mlx5_vdpa_get_vq_ready,
1872 .set_vq_state = mlx5_vdpa_set_vq_state,
1873 .get_vq_state = mlx5_vdpa_get_vq_state,
1874 .get_vq_notification = mlx5_get_vq_notification,
1875 .get_vq_irq = mlx5_get_vq_irq,
1876 .get_vq_align = mlx5_vdpa_get_vq_align,
1877 .get_features = mlx5_vdpa_get_features,
1878 .set_features = mlx5_vdpa_set_features,
1879 .set_config_cb = mlx5_vdpa_set_config_cb,
1880 .get_vq_num_max = mlx5_vdpa_get_vq_num_max,
1881 .get_device_id = mlx5_vdpa_get_device_id,
1882 .get_vendor_id = mlx5_vdpa_get_vendor_id,
1883 .get_status = mlx5_vdpa_get_status,
1884 .set_status = mlx5_vdpa_set_status,
1885 .get_config = mlx5_vdpa_get_config,
1886 .set_config = mlx5_vdpa_set_config,
1887 .get_generation = mlx5_vdpa_get_generation,
1888 .set_map = mlx5_vdpa_set_map,
1889 .free = mlx5_vdpa_free,
1892 static int alloc_resources(struct mlx5_vdpa_net *ndev)
1894 struct mlx5_vdpa_net_resources *res = &ndev->res;
1898 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n");
1902 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn);
1906 err = create_tis(ndev);
1915 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
1919 static void free_resources(struct mlx5_vdpa_net *ndev)
1921 struct mlx5_vdpa_net_resources *res = &ndev->res;
1927 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
1931 static void init_mvqs(struct mlx5_vdpa_net *ndev)
1933 struct mlx5_vdpa_virtqueue *mvq;
1936 for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
1937 mvq = &ndev->vqs[i];
1938 memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
1941 mvq->fwqp.fw = true;
1943 for (; i < ndev->mvdev.max_vqs; i++) {
1944 mvq = &ndev->vqs[i];
1945 memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
1951 static int mlx5v_probe(struct auxiliary_device *adev,
1952 const struct auxiliary_device_id *id)
1954 struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev);
1955 struct mlx5_core_dev *mdev = madev->mdev;
1956 struct virtio_net_config *config;
1957 struct mlx5_vdpa_dev *mvdev;
1958 struct mlx5_vdpa_net *ndev;
1962 /* we save one virtqueue for control virtqueue should we require it */
1963 max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
1964 max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
1966 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
1967 2 * mlx5_vdpa_max_qps(max_vqs));
1969 return PTR_ERR(ndev);
1971 ndev->mvdev.max_vqs = max_vqs;
1972 mvdev = &ndev->mvdev;
1975 mutex_init(&ndev->reslock);
1976 config = &ndev->config;
1977 err = mlx5_query_nic_vport_mtu(mdev, &ndev->mtu);
1981 err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
1985 mvdev->vdev.dma_dev = mdev->device;
1986 err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
1990 err = alloc_resources(ndev);
1994 err = vdpa_register_device(&mvdev->vdev);
1998 dev_set_drvdata(&adev->dev, ndev);
2002 free_resources(ndev);
2004 mlx5_vdpa_free_resources(&ndev->mvdev);
2006 mutex_destroy(&ndev->reslock);
2007 put_device(&mvdev->vdev.dev);
2011 static void mlx5v_remove(struct auxiliary_device *adev)
2013 struct mlx5_vdpa_dev *mvdev = dev_get_drvdata(&adev->dev);
2015 vdpa_unregister_device(&mvdev->vdev);
2018 static const struct auxiliary_device_id mlx5v_id_table[] = {
2019 { .name = MLX5_ADEV_NAME ".vnet", },
2023 MODULE_DEVICE_TABLE(auxiliary, mlx5v_id_table);
2025 static struct auxiliary_driver mlx5v_driver = {
2027 .probe = mlx5v_probe,
2028 .remove = mlx5v_remove,
2029 .id_table = mlx5v_id_table,
2032 module_auxiliary_driver(mlx5v_driver);