2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/delay.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/xarray.h>
38 #include "mlx5_core.h"
43 MLX5_PAGES_CANT_GIVE = 0,
48 struct mlx5_pages_req {
49 struct mlx5_core_dev *dev;
53 struct work_struct work;
58 struct rb_node rb_node;
62 unsigned long bitmask;
63 struct list_head list;
64 unsigned int free_count;
68 MLX5_MAX_RECLAIM_TIME_MILI = 5000,
69 MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
72 static u32 get_function(u16 func_id, bool ec_function)
74 return (u32)func_id | (ec_function << 16);
77 static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function)
80 return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
82 if (func_id <= max(mlx5_core_max_vfs(dev), mlx5_core_max_ec_vfs(dev))) {
91 static u32 mlx5_get_ec_function(u32 function)
93 return function >> 16;
96 static u32 mlx5_get_func_id(u32 function)
98 return function & 0xffff;
101 static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
103 struct rb_root *root;
106 root = xa_load(&dev->priv.page_root_xa, function);
110 root = kzalloc(sizeof(*root), GFP_KERNEL);
112 return ERR_PTR(-ENOMEM);
114 err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
125 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
127 struct rb_node *parent = NULL;
128 struct rb_root *root;
129 struct rb_node **new;
134 root = page_root_per_function(dev, function);
136 return PTR_ERR(root);
138 new = &root->rb_node;
142 tfp = rb_entry(parent, struct fw_page, rb_node);
143 if (tfp->addr < addr)
144 new = &parent->rb_left;
145 else if (tfp->addr > addr)
146 new = &parent->rb_right;
151 nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
157 nfp->function = function;
158 nfp->free_count = MLX5_NUM_4K_IN_PAGE;
159 for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
160 set_bit(i, &nfp->bitmask);
162 rb_link_node(&nfp->rb_node, parent, new);
163 rb_insert_color(&nfp->rb_node, root);
164 list_add(&nfp->list, &dev->priv.free_list);
169 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
172 struct fw_page *result = NULL;
173 struct rb_root *root;
177 root = xa_load(&dev->priv.page_root_xa, function);
178 if (WARN_ON_ONCE(!root))
184 tfp = rb_entry(tmp, struct fw_page, rb_node);
185 if (tfp->addr < addr) {
187 } else if (tfp->addr > addr) {
198 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
199 s32 *npages, int boot)
201 u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
202 u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
205 MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
206 MLX5_SET(query_pages_in, in, op_mod, boot ?
207 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
208 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
209 MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
211 err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
215 *npages = MLX5_GET(query_pages_out, out, num_pages);
216 *func_id = MLX5_GET(query_pages_out, out, function_id);
221 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
223 struct fw_page *fp = NULL;
224 struct fw_page *iter;
227 list_for_each_entry(iter, &dev->priv.free_list, list) {
228 if (iter->function != function)
233 if (list_empty(&dev->priv.free_list) || !fp)
236 n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
237 if (n >= MLX5_NUM_4K_IN_PAGE) {
238 mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n",
239 fp->addr, n, fp->bitmask, MLX5_NUM_4K_IN_PAGE);
242 clear_bit(n, &fp->bitmask);
247 *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
252 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
254 static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
257 struct rb_root *root;
259 root = xa_load(&dev->priv.page_root_xa, fwp->function);
260 if (WARN_ON_ONCE(!root))
263 rb_erase(&fwp->rb_node, root);
265 list_del(&fwp->list);
266 dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
267 PAGE_SIZE, DMA_BIDIRECTIONAL);
268 __free_page(fwp->page);
272 static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
277 fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
279 mlx5_core_warn_rl(dev, "page not found\n");
282 n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
284 set_bit(n, &fwp->bitmask);
285 if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
286 free_fwp(dev, fwp, fwp->free_count != 1);
287 else if (fwp->free_count == 1)
288 list_add(&fwp->list, &dev->priv.free_list);
291 static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
293 struct device *device = mlx5_core_dma_dev(dev);
294 int nid = dev_to_node(device);
300 page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
302 mlx5_core_warn(dev, "failed to allocate page\n");
306 addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
307 if (dma_mapping_error(device, addr)) {
308 mlx5_core_warn(dev, "failed dma mapping page\n");
313 /* Firmware doesn't support page with physical address 0 */
319 err = insert_page(dev, addr, page, function);
321 mlx5_core_err(dev, "failed to track allocated page\n");
322 dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
330 dma_unmap_page(device, zero_addr, PAGE_SIZE,
336 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
339 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
342 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
343 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
344 MLX5_SET(manage_pages_in, in, function_id, func_id);
345 MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
347 err = mlx5_cmd_exec_in(dev, manage_pages, in);
349 mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
353 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
354 int event, bool ec_function)
356 u32 function = get_function(func_id, ec_function);
357 u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
358 int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
359 int notify_fail = event;
366 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
367 in = kvzalloc(inlen, GFP_KERNEL);
370 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
374 for (i = 0; i < npages; i++) {
376 err = alloc_4k(dev, &addr, function);
379 err = alloc_system_page(dev, function);
381 dev->priv.fw_pages_alloc_failed += (npages - i);
387 MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
390 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
391 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
392 MLX5_SET(manage_pages_in, in, function_id, func_id);
393 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
394 MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
396 err = mlx5_cmd_do(dev, in, inlen, out, sizeof(out));
397 if (err == -EREMOTEIO) {
399 /* if triggered by FW and failed by FW ignore */
405 err = mlx5_cmd_check(dev, err, in, out);
407 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
408 func_id, npages, err);
412 func_type = func_id_to_type(dev, func_id, ec_function);
413 dev->priv.page_counters[func_type] += npages;
414 dev->priv.fw_pages += npages;
416 mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
417 npages, ec_function, func_id, err);
423 dev->priv.give_pages_dropped += npages;
425 for (i--; i >= 0; i--)
426 free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
430 page_notify_fail(dev, func_id, ec_function);
434 static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
437 u32 function = get_function(func_id, ec_function);
438 struct rb_root *root;
443 root = xa_load(&dev->priv.page_root_xa, function);
444 if (WARN_ON_ONCE(!root))
449 struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
452 npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
453 free_fwp(dev, fwp, fwp->free_count);
456 func_type = func_id_to_type(dev, func_id, ec_function);
457 dev->priv.page_counters[func_type] -= npages;
458 dev->priv.fw_pages -= npages;
460 mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
461 npages, ec_function, func_id);
464 static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
470 for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
471 MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
472 fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
482 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
483 u32 *in, int in_size, u32 *out, int out_size)
485 struct rb_root *root;
493 if (!mlx5_cmd_is_down(dev))
494 return mlx5_cmd_do(dev, in, in_size, out, out_size);
496 /* No hard feelings, we want our pages back! */
497 npages = MLX5_GET(manage_pages_in, in, input_num_entries);
498 func_id = MLX5_GET(manage_pages_in, in, function_id);
499 ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
501 root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
502 if (WARN_ON_ONCE(!root))
506 while (p && i < npages) {
507 fwp = rb_entry(p, struct fw_page, rb_node);
510 i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
513 MLX5_SET(manage_pages_out, out, output_num_entries, i);
517 static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
518 int *nclaimed, bool event, bool ec_function)
520 u32 function = get_function(func_id, ec_function);
521 int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
522 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
532 outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
533 out = kvzalloc(outlen, GFP_KERNEL);
537 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
538 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
539 MLX5_SET(manage_pages_in, in, function_id, func_id);
540 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
541 MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
543 mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
544 func_id, npages, outlen);
545 err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
547 npages = MLX5_GET(manage_pages_in, in, input_num_entries);
548 dev->priv.reclaim_pages_discard += npages;
550 /* if triggered by FW event and failed by FW then ignore */
551 if (event && err == -EREMOTEIO) {
556 err = mlx5_cmd_check(dev, err, in, out);
558 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
562 num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
563 if (num_claimed > npages) {
564 mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
565 num_claimed, npages);
570 for (i = 0; i < num_claimed; i++)
571 free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
574 *nclaimed = num_claimed;
576 func_type = func_id_to_type(dev, func_id, ec_function);
577 dev->priv.page_counters[func_type] -= num_claimed;
578 dev->priv.fw_pages -= num_claimed;
585 static void pages_work_handler(struct work_struct *work)
587 struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
588 struct mlx5_core_dev *dev = req->dev;
591 if (req->release_all)
592 release_all_pages(dev, req->func_id, req->ec_function);
593 else if (req->npages < 0)
594 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
595 true, req->ec_function);
596 else if (req->npages > 0)
597 err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
600 mlx5_core_warn(dev, "%s fail %d\n",
601 req->npages < 0 ? "reclaim" : "give", err);
607 EC_FUNCTION_MASK = 0x8000,
608 RELEASE_ALL_PAGES_MASK = 0x4000,
611 static int req_pages_handler(struct notifier_block *nb,
612 unsigned long type, void *data)
614 struct mlx5_pages_req *req;
615 struct mlx5_core_dev *dev;
616 struct mlx5_priv *priv;
617 struct mlx5_eqe *eqe;
623 priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
624 dev = container_of(priv, struct mlx5_core_dev, priv);
627 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
628 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
629 ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
630 release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
631 RELEASE_ALL_PAGES_MASK;
632 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
633 func_id, npages, release_all);
634 req = kzalloc(sizeof(*req), GFP_ATOMIC);
636 mlx5_core_warn(dev, "failed to allocate pages request\n");
641 req->func_id = func_id;
642 req->npages = npages;
643 req->ec_function = ec_function;
644 req->release_all = release_all;
645 INIT_WORK(&req->work, pages_work_handler);
646 queue_work(dev->priv.pg_wq, &req->work);
650 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
656 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
660 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
661 npages, boot ? "boot" : "init", func_id);
663 return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
667 MLX5_BLKS_FOR_RECLAIM_PAGES = 12
670 static int optimal_reclaimed_pages(void)
672 struct mlx5_cmd_prot_block *block;
673 struct mlx5_cmd_layout *lay;
676 ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
677 MLX5_ST_SZ_BYTES(manage_pages_out)) /
678 MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
683 static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
684 struct rb_root *root, u32 function)
686 u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
687 unsigned long end = jiffies + recl_pages_to_jiffies;
689 while (!RB_EMPTY_ROOT(root)) {
690 u32 ec_function = mlx5_get_ec_function(function);
691 u32 function_id = mlx5_get_func_id(function);
695 err = reclaim_pages(dev, function_id, optimal_reclaimed_pages(),
696 &nclaimed, false, ec_function);
698 mlx5_core_warn(dev, "reclaim_pages err (%d) func_id=0x%x ec_func=0x%x\n",
699 err, function_id, ec_function);
704 end = jiffies + recl_pages_to_jiffies;
706 if (time_after(jiffies, end)) {
707 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
715 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
717 struct rb_root *root;
721 xa_for_each(&dev->priv.page_root_xa, id, entry) {
723 mlx5_reclaim_root_pages(dev, root, id);
724 xa_erase(&dev->priv.page_root_xa, id);
728 WARN_ON(!xa_empty(&dev->priv.page_root_xa));
730 WARN(dev->priv.fw_pages,
731 "FW pages counter is %d after reclaiming all pages\n",
733 WARN(dev->priv.page_counters[MLX5_VF],
734 "VFs FW pages counter is %d after reclaiming all pages\n",
735 dev->priv.page_counters[MLX5_VF]);
736 WARN(dev->priv.page_counters[MLX5_HOST_PF],
737 "External host PF FW pages counter is %d after reclaiming all pages\n",
738 dev->priv.page_counters[MLX5_HOST_PF]);
739 WARN(dev->priv.page_counters[MLX5_EC_VF],
740 "EC VFs FW pages counter is %d after reclaiming all pages\n",
741 dev->priv.page_counters[MLX5_EC_VF]);
746 int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
748 INIT_LIST_HEAD(&dev->priv.free_list);
749 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
750 if (!dev->priv.pg_wq)
753 xa_init(&dev->priv.page_root_xa);
754 mlx5_pages_debugfs_init(dev);
759 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
761 mlx5_pages_debugfs_cleanup(dev);
762 xa_destroy(&dev->priv.page_root_xa);
763 destroy_workqueue(dev->priv.pg_wq);
766 void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
768 MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
769 mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
772 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
774 mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
775 flush_workqueue(dev->priv.pg_wq);
778 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
780 u64 recl_vf_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_VFS_PAGES));
781 unsigned long end = jiffies + recl_vf_pages_to_jiffies;
782 int prev_pages = *pages;
784 /* In case of internal error we will free the pages manually later */
785 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
786 mlx5_core_warn(dev, "Skipping wait for vf pages stage");
790 mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
792 if (time_after(jiffies, end)) {
793 mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
796 if (*pages < prev_pages) {
797 end = jiffies + recl_vf_pages_to_jiffies;
803 mlx5_core_dbg(dev, "All pages received\n");