2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/delay.h>
37 #include <linux/mlx5/driver.h>
38 #include "mlx5_core.h"
42 MLX5_PAGES_CANT_GIVE = 0,
47 struct mlx5_pages_req {
48 struct mlx5_core_dev *dev;
52 struct work_struct work;
57 struct rb_node rb_node;
61 unsigned long bitmask;
62 struct list_head list;
67 MAX_RECLAIM_TIME_MSECS = 5000,
68 MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
72 MLX5_MAX_RECLAIM_TIME_MILI = 5000,
73 MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
76 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
78 struct rb_root *root = &dev->priv.page_root;
79 struct rb_node **new = &root->rb_node;
80 struct rb_node *parent = NULL;
87 tfp = rb_entry(parent, struct fw_page, rb_node);
89 new = &parent->rb_left;
90 else if (tfp->addr > addr)
91 new = &parent->rb_right;
96 nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
102 nfp->func_id = func_id;
103 nfp->free_count = MLX5_NUM_4K_IN_PAGE;
104 for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
105 set_bit(i, &nfp->bitmask);
107 rb_link_node(&nfp->rb_node, parent, new);
108 rb_insert_color(&nfp->rb_node, root);
109 list_add(&nfp->list, &dev->priv.free_list);
114 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
116 struct rb_root *root = &dev->priv.page_root;
117 struct rb_node *tmp = root->rb_node;
118 struct fw_page *result = NULL;
122 tfp = rb_entry(tmp, struct fw_page, rb_node);
123 if (tfp->addr < addr) {
125 } else if (tfp->addr > addr) {
136 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
137 s32 *npages, int boot)
139 u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
140 u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
143 MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
144 MLX5_SET(query_pages_in, in, op_mod, boot ?
145 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
146 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
147 MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
149 err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
153 *npages = MLX5_GET(query_pages_out, out, num_pages);
154 *func_id = MLX5_GET(query_pages_out, out, function_id);
159 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
164 if (list_empty(&dev->priv.free_list))
167 fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
168 n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
169 if (n >= MLX5_NUM_4K_IN_PAGE) {
170 mlx5_core_warn(dev, "alloc 4k bug\n");
173 clear_bit(n, &fp->bitmask);
178 *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
183 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
185 static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp)
187 int n = (fwp->addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
190 set_bit(n, &fwp->bitmask);
191 if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
192 rb_erase(&fwp->rb_node, &dev->priv.page_root);
193 if (fwp->free_count != 1)
194 list_del(&fwp->list);
195 dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK,
196 PAGE_SIZE, DMA_BIDIRECTIONAL);
197 __free_page(fwp->page);
199 } else if (fwp->free_count == 1) {
200 list_add(&fwp->list, &dev->priv.free_list);
204 static void free_addr(struct mlx5_core_dev *dev, u64 addr)
208 fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
210 mlx5_core_warn_rl(dev, "page not found\n");
216 static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
218 struct device *device = dev->device;
219 int nid = dev_to_node(device);
225 page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
227 mlx5_core_warn(dev, "failed to allocate page\n");
231 addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
232 if (dma_mapping_error(device, addr)) {
233 mlx5_core_warn(dev, "failed dma mapping page\n");
238 /* Firmware doesn't support page with physical address 0 */
244 err = insert_page(dev, addr, page, func_id);
246 mlx5_core_err(dev, "failed to track allocated page\n");
247 dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
255 dma_unmap_page(device, zero_addr, PAGE_SIZE,
261 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
264 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
267 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
268 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
269 MLX5_SET(manage_pages_in, in, function_id, func_id);
270 MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
272 err = mlx5_cmd_exec_in(dev, manage_pages, in);
274 mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
278 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
279 int notify_fail, bool ec_function)
281 u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
282 int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
288 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
289 in = kvzalloc(inlen, GFP_KERNEL);
292 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
296 for (i = 0; i < npages; i++) {
298 err = alloc_4k(dev, &addr);
301 err = alloc_system_page(dev, func_id);
307 MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
310 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
311 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
312 MLX5_SET(manage_pages_in, in, function_id, func_id);
313 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
314 MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
316 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
318 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
319 func_id, npages, err);
323 dev->priv.fw_pages += npages;
325 dev->priv.vfs_pages += npages;
326 else if (mlx5_core_is_ecpf(dev) && !ec_function)
327 dev->priv.peer_pf_pages += npages;
329 mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
330 npages, ec_function, func_id, err);
336 for (i--; i >= 0; i--)
337 free_addr(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
341 page_notify_fail(dev, func_id, ec_function);
345 static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
351 p = rb_first(&dev->priv.page_root);
353 struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
356 if (fwp->func_id != func_id)
362 dev->priv.fw_pages -= npages;
364 dev->priv.vfs_pages -= npages;
365 else if (mlx5_core_is_ecpf(dev) && !ec_function)
366 dev->priv.peer_pf_pages -= npages;
368 mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
369 npages, ec_function, func_id);
372 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
373 u32 *in, int in_size, u32 *out, int out_size)
381 if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
382 return mlx5_cmd_exec(dev, in, in_size, out, out_size);
384 /* No hard feelings, we want our pages back! */
385 npages = MLX5_GET(manage_pages_in, in, input_num_entries);
386 func_id = MLX5_GET(manage_pages_in, in, function_id);
388 p = rb_first(&dev->priv.page_root);
389 while (p && i < npages) {
390 fwp = rb_entry(p, struct fw_page, rb_node);
392 if (fwp->func_id != func_id)
395 MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
399 MLX5_SET(manage_pages_out, out, output_num_entries, i);
403 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
404 int *nclaimed, bool ec_function)
406 int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
407 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
416 outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
417 out = kvzalloc(outlen, GFP_KERNEL);
421 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
422 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
423 MLX5_SET(manage_pages_in, in, function_id, func_id);
424 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
425 MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
427 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
428 err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
430 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
434 num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
435 if (num_claimed > npages) {
436 mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
437 num_claimed, npages);
442 for (i = 0; i < num_claimed; i++)
443 free_addr(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
446 *nclaimed = num_claimed;
448 dev->priv.fw_pages -= num_claimed;
450 dev->priv.vfs_pages -= num_claimed;
451 else if (mlx5_core_is_ecpf(dev) && !ec_function)
452 dev->priv.peer_pf_pages -= num_claimed;
459 static void pages_work_handler(struct work_struct *work)
461 struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
462 struct mlx5_core_dev *dev = req->dev;
465 if (req->release_all)
466 release_all_pages(dev, req->func_id, req->ec_function);
467 else if (req->npages < 0)
468 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
470 else if (req->npages > 0)
471 err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
474 mlx5_core_warn(dev, "%s fail %d\n",
475 req->npages < 0 ? "reclaim" : "give", err);
481 EC_FUNCTION_MASK = 0x8000,
482 RELEASE_ALL_PAGES_MASK = 0x4000,
485 static int req_pages_handler(struct notifier_block *nb,
486 unsigned long type, void *data)
488 struct mlx5_pages_req *req;
489 struct mlx5_core_dev *dev;
490 struct mlx5_priv *priv;
491 struct mlx5_eqe *eqe;
497 priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
498 dev = container_of(priv, struct mlx5_core_dev, priv);
501 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
502 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
503 ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
504 release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
505 RELEASE_ALL_PAGES_MASK;
506 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
507 func_id, npages, release_all);
508 req = kzalloc(sizeof(*req), GFP_ATOMIC);
510 mlx5_core_warn(dev, "failed to allocate pages request\n");
515 req->func_id = func_id;
516 req->npages = npages;
517 req->ec_function = ec_function;
518 req->release_all = release_all;
519 INIT_WORK(&req->work, pages_work_handler);
520 queue_work(dev->priv.pg_wq, &req->work);
524 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
526 u16 uninitialized_var(func_id);
527 s32 uninitialized_var(npages);
530 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
534 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
535 npages, boot ? "boot" : "init", func_id);
537 return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
541 MLX5_BLKS_FOR_RECLAIM_PAGES = 12
544 static int optimal_reclaimed_pages(void)
546 struct mlx5_cmd_prot_block *block;
547 struct mlx5_cmd_layout *lay;
550 ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
551 MLX5_ST_SZ_BYTES(manage_pages_out)) /
552 MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
557 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
559 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
566 p = rb_first(&dev->priv.page_root);
568 fwp = rb_entry(p, struct fw_page, rb_node);
569 err = reclaim_pages(dev, fwp->func_id,
570 optimal_reclaimed_pages(),
571 &nclaimed, mlx5_core_is_ecpf(dev));
574 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
579 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
581 if (time_after(jiffies, end)) {
582 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
587 WARN(dev->priv.fw_pages,
588 "FW pages counter is %d after reclaiming all pages\n",
590 WARN(dev->priv.vfs_pages,
591 "VFs FW pages counter is %d after reclaiming all pages\n",
592 dev->priv.vfs_pages);
593 WARN(dev->priv.peer_pf_pages,
594 "Peer PF FW pages counter is %d after reclaiming all pages\n",
595 dev->priv.peer_pf_pages);
600 int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
602 dev->priv.page_root = RB_ROOT;
603 INIT_LIST_HEAD(&dev->priv.free_list);
604 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
605 if (!dev->priv.pg_wq)
611 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
613 destroy_workqueue(dev->priv.pg_wq);
616 void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
618 MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
619 mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
622 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
624 mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
625 flush_workqueue(dev->priv.pg_wq);
628 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
630 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
631 int prev_pages = *pages;
633 /* In case of internal error we will free the pages manually later */
634 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
635 mlx5_core_warn(dev, "Skipping wait for vf pages stage");
639 mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
641 if (time_after(jiffies, end)) {
642 mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
645 if (*pages < prev_pages) {
646 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
652 mlx5_core_dbg(dev, "All pages received\n");