1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */
4 #include <linux/vdpa.h>
6 #include <linux/string.h>
7 #include <linux/mlx5/qp.h>
10 /* DIV_ROUND_UP where the divider is a power of 2 give by its log base 2 value */
11 #define MLX5_DIV_ROUND_UP_POW2(_n, _s) \
15 _res = (((_n) + (1 << (__s)) - 1) >> (__s)); \
19 static int get_octo_len(u64 len, int page_shift)
21 u64 page_size = 1ULL << page_shift;
24 npages = ALIGN(len, page_size) >> page_shift;
25 return (npages + 1) / 2;
28 static void mlx5_set_access_mode(void *mkc, int mode)
30 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
31 MLX5_SET(mkc, mkc, access_mode_4_2, mode >> 2);
34 static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
36 struct scatterlist *sg;
43 for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) {
44 for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg);
46 nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size))
47 mtt[j++] = cpu_to_be64(dma_addr);
51 static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
58 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16);
59 in = kvzalloc(inlen, GFP_KERNEL);
63 MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
64 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
65 MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
66 MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
67 mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_MTT);
68 MLX5_SET(mkc, mkc, qpn, 0xffffff);
69 MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
70 MLX5_SET64(mkc, mkc, start_addr, mr->offset);
71 MLX5_SET64(mkc, mkc, len, mr->end - mr->start);
72 MLX5_SET(mkc, mkc, log_page_size, mr->log_size);
73 MLX5_SET(mkc, mkc, translations_octword_size,
74 get_octo_len(mr->end - mr->start, mr->log_size));
75 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
76 get_octo_len(mr->end - mr->start, mr->log_size));
77 populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt));
78 err = mlx5_vdpa_create_mkey(mvdev, &mr->mr, in, inlen);
81 mlx5_vdpa_warn(mvdev, "Failed to create direct MR\n");
88 static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
90 mlx5_vdpa_destroy_mkey(mvdev, &mr->mr);
93 static u64 map_start(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
95 return max_t(u64, map->start, mr->start);
98 static u64 map_end(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
100 return min_t(u64, map->last + 1, mr->end);
103 static u64 maplen(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
105 return map_end(map, mr) - map_start(map, mr);
108 #define MLX5_VDPA_INVALID_START_ADDR ((u64)-1)
109 #define MLX5_VDPA_INVALID_LEN ((u64)-1)
111 static u64 indir_start_addr(struct mlx5_vdpa_mr *mkey)
113 struct mlx5_vdpa_direct_mr *s;
115 s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
117 return MLX5_VDPA_INVALID_START_ADDR;
122 static u64 indir_len(struct mlx5_vdpa_mr *mkey)
124 struct mlx5_vdpa_direct_mr *s;
125 struct mlx5_vdpa_direct_mr *e;
127 s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
129 return MLX5_VDPA_INVALID_LEN;
131 e = list_last_entry(&mkey->head, struct mlx5_vdpa_direct_mr, list);
133 return e->end - s->start;
136 #define LOG_MAX_KLM_SIZE 30
137 #define MAX_KLM_SIZE BIT(LOG_MAX_KLM_SIZE)
139 static u32 klm_bcount(u64 size)
144 static void fill_indir(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey, void *in)
146 struct mlx5_vdpa_direct_mr *dmr;
147 struct mlx5_klm *klmarr;
148 struct mlx5_klm *klm;
153 klmarr = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
155 list_for_each_entry(dmr, &mkey->head, list) {
163 if (preve == dmr->start) {
164 klm->key = cpu_to_be32(dmr->mr.key);
165 klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start));
168 klm->key = cpu_to_be32(mvdev->res.null_mkey);
169 klm->bcount = cpu_to_be32(klm_bcount(dmr->start - preve));
176 static int klm_byte_size(int nklms)
178 return 16 * ALIGN(nklms, 4);
181 static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
190 start = indir_start_addr(mr);
192 if (start == MLX5_VDPA_INVALID_START_ADDR || len == MLX5_VDPA_INVALID_LEN)
195 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + klm_byte_size(mr->num_klms);
196 in = kzalloc(inlen, GFP_KERNEL);
200 MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
201 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
202 MLX5_SET(mkc, mkc, lw, 1);
203 MLX5_SET(mkc, mkc, lr, 1);
204 mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_KLMS);
205 MLX5_SET(mkc, mkc, qpn, 0xffffff);
206 MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
207 MLX5_SET64(mkc, mkc, start_addr, start);
208 MLX5_SET64(mkc, mkc, len, len);
209 MLX5_SET(mkc, mkc, translations_octword_size, klm_byte_size(mr->num_klms) / 16);
210 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, mr->num_klms);
211 fill_indir(mvdev, mr, in);
212 err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen);
217 static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey)
219 mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey);
222 static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
223 struct vhost_iotlb *iotlb)
225 struct vhost_iotlb_map *map;
226 unsigned long lgcd = 0;
236 struct scatterlist *sg;
237 struct device *dma = mvdev->vdev.dma_dev;
239 for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
240 map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
241 size = maplen(map, mr);
242 lgcd = gcd(lgcd, size);
245 log_entity_size = ilog2(lgcd);
247 sglen = 1 << log_entity_size;
248 nsg = MLX5_DIV_ROUND_UP_POW2(mr->end - mr->start, log_entity_size);
250 err = sg_alloc_table(&mr->sg_head, nsg, GFP_KERNEL);
254 sg = mr->sg_head.sgl;
255 for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
256 map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
257 paend = map->addr + maplen(map, mr);
258 for (pa = map->addr; pa < paend; pa += sglen) {
259 pg = pfn_to_page(__phys_to_pfn(pa));
261 mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n",
262 map->start, map->last + 1);
266 sg_set_page(sg, pg, sglen, 0);
273 mr->log_size = log_entity_size;
275 mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
281 err = create_direct_mr(mvdev, mr);
288 dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
290 sg_free_table(&mr->sg_head);
294 static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
296 struct device *dma = mvdev->vdev.dma_dev;
298 destroy_direct_mr(mvdev, mr);
299 dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
300 sg_free_table(&mr->sg_head);
303 static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 perm,
304 struct vhost_iotlb *iotlb)
306 struct mlx5_vdpa_mr *mr = &mvdev->mr;
307 struct mlx5_vdpa_direct_mr *dmr;
308 struct mlx5_vdpa_direct_mr *n;
317 sz = (u32)min_t(u64, MAX_KLM_SIZE, size);
318 dmr = kzalloc(sizeof(*dmr), GFP_KERNEL);
327 err = map_direct_mr(mvdev, dmr, iotlb);
333 list_add_tail(&dmr->list, &tmp);
340 list_splice_tail(&tmp, &mr->head);
344 list_for_each_entry_safe(dmr, n, &mr->head, list) {
345 list_del_init(&dmr->list);
346 unmap_direct_mr(mvdev, dmr);
352 /* The iotlb pointer contains a list of maps. Go over the maps, possibly
353 * merging mergeable maps, and create direct memory keys that provide the
354 * device access to memory. The direct mkeys are then referred to by the
355 * indirect memory key that provides access to the enitre address space given
358 static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
360 struct mlx5_vdpa_mr *mr = &mvdev->mr;
361 struct mlx5_vdpa_direct_mr *dmr;
362 struct mlx5_vdpa_direct_mr *n;
363 struct vhost_iotlb_map *map;
372 INIT_LIST_HEAD(&mr->head);
373 for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
374 map = vhost_iotlb_itree_next(map, start, last)) {
376 if (pe == map->start && pperm == map->perm) {
380 if (pe < map->start) {
381 /* We have a hole in the map. Check how
382 * many null keys are required to fill it.
384 nnuls = MLX5_DIV_ROUND_UP_POW2(map->start - pe,
386 mr->num_klms += nnuls;
388 err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
397 err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
401 /* Create the memory key that defines the guests's address space. This
402 * memory key refers to the direct keys that contain the MTT
405 err = create_indirect_key(mvdev, mr);
413 list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
414 list_del_init(&dmr->list);
415 unmap_direct_mr(mvdev, dmr);
421 static int create_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
423 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
428 in = kzalloc(inlen, GFP_KERNEL);
432 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
434 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
435 MLX5_SET(mkc, mkc, length64, 1);
436 MLX5_SET(mkc, mkc, lw, 1);
437 MLX5_SET(mkc, mkc, lr, 1);
438 MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
439 MLX5_SET(mkc, mkc, qpn, 0xffffff);
441 err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen);
449 static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
451 mlx5_vdpa_destroy_mkey(mvdev, &mr->mkey);
454 static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
456 struct mlx5_vdpa_mr *mr = &mvdev->mr;
463 err = create_user_mr(mvdev, iotlb);
465 err = create_dma_mr(mvdev, mr);
468 mr->initialized = true;
473 int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
477 mutex_lock(&mvdev->mr.mkey_mtx);
478 err = _mlx5_vdpa_create_mr(mvdev, iotlb);
479 mutex_unlock(&mvdev->mr.mkey_mtx);
483 static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
485 struct mlx5_vdpa_direct_mr *dmr;
486 struct mlx5_vdpa_direct_mr *n;
488 destroy_indirect_key(mvdev, mr);
489 list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
490 list_del_init(&dmr->list);
491 unmap_direct_mr(mvdev, dmr);
496 void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
498 struct mlx5_vdpa_mr *mr = &mvdev->mr;
500 mutex_lock(&mr->mkey_mtx);
501 if (!mr->initialized)
505 destroy_user_mr(mvdev, mr);
507 destroy_dma_mr(mvdev, mr);
509 memset(mr, 0, sizeof(*mr));
510 mr->initialized = false;
512 mutex_unlock(&mr->mkey_mtx);
515 static bool map_empty(struct vhost_iotlb *iotlb)
517 return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX);
520 int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
523 struct mlx5_vdpa_mr *mr = &mvdev->mr;
527 if (map_empty(iotlb)) {
528 mlx5_vdpa_destroy_mr(mvdev);
531 mutex_lock(&mr->mkey_mtx);
532 if (mr->initialized) {
533 mlx5_vdpa_info(mvdev, "memory map update\n");
537 err = _mlx5_vdpa_create_mr(mvdev, iotlb);
538 mutex_unlock(&mr->mkey_mtx);