2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/errno.h>
34 #include <linux/slab.h>
36 #include <linux/export.h>
37 #include <linux/bitmap.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/vmalloc.h>
40 #include <linux/mlx5/driver.h>
42 #include "mlx5_core.h"
44 struct mlx5_db_pgdir {
45 struct list_head list;
46 unsigned long *bitmap;
51 /* Handling for queue buffers -- we allocate a bunch of memory and
52 * register it in a memory region at HCA virtual address 0.
55 static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
56 size_t size, dma_addr_t *dma_handle,
59 struct device *device = mlx5_core_dma_dev(dev);
60 struct mlx5_priv *priv = &dev->priv;
64 mutex_lock(&priv->alloc_mutex);
65 original_node = dev_to_node(device);
66 set_dev_node(device, node);
67 cpu_handle = dma_alloc_coherent(device, size, dma_handle,
69 set_dev_node(device, original_node);
70 mutex_unlock(&priv->alloc_mutex);
74 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
75 struct mlx5_frag_buf *buf, int node)
80 buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
81 buf->page_shift = PAGE_SHIFT;
82 buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
87 for (i = 0; i < buf->npages; i++) {
88 struct mlx5_buf_list *frag = &buf->frags[i];
89 int frag_sz = min_t(int, size, PAGE_SIZE);
91 frag->buf = mlx5_dma_zalloc_coherent_node(dev, frag_sz,
95 if (frag->map & ((1 << buf->page_shift) - 1)) {
96 dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz,
97 buf->frags[i].buf, buf->frags[i].map);
98 mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
99 &frag->map, buf->page_shift);
109 dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE, buf->frags[i].buf,
115 EXPORT_SYMBOL_GPL(mlx5_frag_buf_alloc_node);
117 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
119 int size = buf->size;
122 for (i = 0; i < buf->npages; i++) {
123 int frag_sz = min_t(int, size, PAGE_SIZE);
125 dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz, buf->frags[i].buf,
131 EXPORT_SYMBOL_GPL(mlx5_frag_buf_free);
133 static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
136 u32 db_per_page = PAGE_SIZE / cache_line_size();
137 struct mlx5_db_pgdir *pgdir;
139 pgdir = kzalloc_node(sizeof(*pgdir), GFP_KERNEL, node);
143 pgdir->bitmap = bitmap_zalloc_node(db_per_page, GFP_KERNEL, node);
144 if (!pgdir->bitmap) {
149 bitmap_fill(pgdir->bitmap, db_per_page);
151 pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
152 &pgdir->db_dma, node);
153 if (!pgdir->db_page) {
154 bitmap_free(pgdir->bitmap);
162 static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
165 u32 db_per_page = PAGE_SIZE / cache_line_size();
169 i = find_first_bit(pgdir->bitmap, db_per_page);
170 if (i >= db_per_page)
173 __clear_bit(i, pgdir->bitmap);
177 offset = db->index * cache_line_size();
178 db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
179 db->dma = pgdir->db_dma + offset;
187 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
189 struct mlx5_db_pgdir *pgdir;
192 mutex_lock(&dev->priv.pgdir_mutex);
194 list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
195 if (!mlx5_alloc_db_from_pgdir(pgdir, db))
198 pgdir = mlx5_alloc_db_pgdir(dev, node);
204 list_add(&pgdir->list, &dev->priv.pgdir_list);
206 /* This should never fail -- we just allocated an empty page: */
207 WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
210 mutex_unlock(&dev->priv.pgdir_mutex);
214 EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
216 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
218 u32 db_per_page = PAGE_SIZE / cache_line_size();
220 mutex_lock(&dev->priv.pgdir_mutex);
222 __set_bit(db->index, db->u.pgdir->bitmap);
224 if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
225 dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE,
226 db->u.pgdir->db_page, db->u.pgdir->db_dma);
227 list_del(&db->u.pgdir->list);
228 bitmap_free(db->u.pgdir->bitmap);
232 mutex_unlock(&dev->priv.pgdir_mutex);
234 EXPORT_SYMBOL_GPL(mlx5_db_free);
236 void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm)
240 WARN_ON(perm & 0xfc);
241 for (i = 0; i < buf->npages; i++)
242 pas[i] = cpu_to_be64(buf->frags[i].map | perm);
244 EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array_perm);
246 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
248 mlx5_fill_page_frag_array_perm(buf, pas, 0);
250 EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);