Merge branch 'turbostat' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / pagealloc.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/delay.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/xarray.h>
39 #include "mlx5_core.h"
40 #include "lib/eq.h"
41
42 enum {
43         MLX5_PAGES_CANT_GIVE    = 0,
44         MLX5_PAGES_GIVE         = 1,
45         MLX5_PAGES_TAKE         = 2
46 };
47
48 struct mlx5_pages_req {
49         struct mlx5_core_dev *dev;
50         u16     func_id;
51         u8      ec_function;
52         s32     npages;
53         struct work_struct work;
54         u8      release_all;
55 };
56
57 struct fw_page {
58         struct rb_node          rb_node;
59         u64                     addr;
60         struct page            *page;
61         u16                     func_id;
62         unsigned long           bitmask;
63         struct list_head        list;
64         unsigned                free_count;
65 };
66
67 enum {
68         MAX_RECLAIM_TIME_MSECS  = 5000,
69         MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
70 };
71
72 enum {
73         MLX5_MAX_RECLAIM_TIME_MILI      = 5000,
74         MLX5_NUM_4K_IN_PAGE             = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
75 };
76
77 static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id)
78 {
79         struct rb_root *root;
80         int err;
81
82         root = xa_load(&dev->priv.page_root_xa, func_id);
83         if (root)
84                 return root;
85
86         root = kzalloc(sizeof(*root), GFP_KERNEL);
87         if (!root)
88                 return ERR_PTR(-ENOMEM);
89
90         err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL);
91         if (err) {
92                 kfree(root);
93                 return ERR_PTR(err);
94         }
95
96         *root = RB_ROOT;
97
98         return root;
99 }
100
101 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
102 {
103         struct rb_node *parent = NULL;
104         struct rb_root *root;
105         struct rb_node **new;
106         struct fw_page *nfp;
107         struct fw_page *tfp;
108         int i;
109
110         root = page_root_per_func_id(dev, func_id);
111         if (IS_ERR(root))
112                 return PTR_ERR(root);
113
114         new = &root->rb_node;
115
116         while (*new) {
117                 parent = *new;
118                 tfp = rb_entry(parent, struct fw_page, rb_node);
119                 if (tfp->addr < addr)
120                         new = &parent->rb_left;
121                 else if (tfp->addr > addr)
122                         new = &parent->rb_right;
123                 else
124                         return -EEXIST;
125         }
126
127         nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
128         if (!nfp)
129                 return -ENOMEM;
130
131         nfp->addr = addr;
132         nfp->page = page;
133         nfp->func_id = func_id;
134         nfp->free_count = MLX5_NUM_4K_IN_PAGE;
135         for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
136                 set_bit(i, &nfp->bitmask);
137
138         rb_link_node(&nfp->rb_node, parent, new);
139         rb_insert_color(&nfp->rb_node, root);
140         list_add(&nfp->list, &dev->priv.free_list);
141
142         return 0;
143 }
144
145 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
146                                     u32 func_id)
147 {
148         struct fw_page *result = NULL;
149         struct rb_root *root;
150         struct rb_node *tmp;
151         struct fw_page *tfp;
152
153         root = xa_load(&dev->priv.page_root_xa, func_id);
154         if (WARN_ON_ONCE(!root))
155                 return NULL;
156
157         tmp = root->rb_node;
158
159         while (tmp) {
160                 tfp = rb_entry(tmp, struct fw_page, rb_node);
161                 if (tfp->addr < addr) {
162                         tmp = tmp->rb_left;
163                 } else if (tfp->addr > addr) {
164                         tmp = tmp->rb_right;
165                 } else {
166                         result = tfp;
167                         break;
168                 }
169         }
170
171         return result;
172 }
173
174 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
175                                 s32 *npages, int boot)
176 {
177         u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
178         u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
179         int err;
180
181         MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
182         MLX5_SET(query_pages_in, in, op_mod, boot ?
183                  MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
184                  MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
185         MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
186
187         err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
188         if (err)
189                 return err;
190
191         *npages = MLX5_GET(query_pages_out, out, num_pages);
192         *func_id = MLX5_GET(query_pages_out, out, function_id);
193
194         return err;
195 }
196
197 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
198 {
199         struct fw_page *fp = NULL;
200         struct fw_page *iter;
201         unsigned n;
202
203         list_for_each_entry(iter, &dev->priv.free_list, list) {
204                 if (iter->func_id != func_id)
205                         continue;
206                 fp = iter;
207         }
208
209         if (list_empty(&dev->priv.free_list) || !fp)
210                 return -ENOMEM;
211
212         n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
213         if (n >= MLX5_NUM_4K_IN_PAGE) {
214                 mlx5_core_warn(dev, "alloc 4k bug\n");
215                 return -ENOENT;
216         }
217         clear_bit(n, &fp->bitmask);
218         fp->free_count--;
219         if (!fp->free_count)
220                 list_del(&fp->list);
221
222         *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
223
224         return 0;
225 }
226
227 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
228
229 static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
230                      bool in_free_list)
231 {
232         struct rb_root *root;
233
234         root = xa_load(&dev->priv.page_root_xa, fwp->func_id);
235         if (WARN_ON_ONCE(!root))
236                 return;
237
238         rb_erase(&fwp->rb_node, root);
239         if (in_free_list)
240                 list_del(&fwp->list);
241         dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
242                        PAGE_SIZE, DMA_BIDIRECTIONAL);
243         __free_page(fwp->page);
244         kfree(fwp);
245 }
246
247 static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
248 {
249         struct fw_page *fwp;
250         int n;
251
252         fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id);
253         if (!fwp) {
254                 mlx5_core_warn_rl(dev, "page not found\n");
255                 return;
256         }
257         n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
258         fwp->free_count++;
259         set_bit(n, &fwp->bitmask);
260         if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
261                 free_fwp(dev, fwp, fwp->free_count != 1);
262         else if (fwp->free_count == 1)
263                 list_add(&fwp->list, &dev->priv.free_list);
264 }
265
266 static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
267 {
268         struct device *device = mlx5_core_dma_dev(dev);
269         int nid = dev_to_node(device);
270         struct page *page;
271         u64 zero_addr = 1;
272         u64 addr;
273         int err;
274
275         page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
276         if (!page) {
277                 mlx5_core_warn(dev, "failed to allocate page\n");
278                 return -ENOMEM;
279         }
280 map:
281         addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
282         if (dma_mapping_error(device, addr)) {
283                 mlx5_core_warn(dev, "failed dma mapping page\n");
284                 err = -ENOMEM;
285                 goto err_mapping;
286         }
287
288         /* Firmware doesn't support page with physical address 0 */
289         if (addr == 0) {
290                 zero_addr = addr;
291                 goto map;
292         }
293
294         err = insert_page(dev, addr, page, func_id);
295         if (err) {
296                 mlx5_core_err(dev, "failed to track allocated page\n");
297                 dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
298         }
299
300 err_mapping:
301         if (err)
302                 __free_page(page);
303
304         if (zero_addr == 0)
305                 dma_unmap_page(device, zero_addr, PAGE_SIZE,
306                                DMA_BIDIRECTIONAL);
307
308         return err;
309 }
310
311 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
312                              bool ec_function)
313 {
314         u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
315         int err;
316
317         MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
318         MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
319         MLX5_SET(manage_pages_in, in, function_id, func_id);
320         MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
321
322         err = mlx5_cmd_exec_in(dev, manage_pages, in);
323         if (err)
324                 mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
325                                func_id, err);
326 }
327
328 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
329                       int notify_fail, bool ec_function)
330 {
331         u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
332         int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
333         u64 addr;
334         int err;
335         u32 *in;
336         int i;
337
338         inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
339         in = kvzalloc(inlen, GFP_KERNEL);
340         if (!in) {
341                 err = -ENOMEM;
342                 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
343                 goto out_free;
344         }
345
346         for (i = 0; i < npages; i++) {
347 retry:
348                 err = alloc_4k(dev, &addr, func_id);
349                 if (err) {
350                         if (err == -ENOMEM)
351                                 err = alloc_system_page(dev, func_id);
352                         if (err)
353                                 goto out_4k;
354
355                         goto retry;
356                 }
357                 MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
358         }
359
360         MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
361         MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
362         MLX5_SET(manage_pages_in, in, function_id, func_id);
363         MLX5_SET(manage_pages_in, in, input_num_entries, npages);
364         MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
365
366         err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
367         if (err) {
368                 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
369                                func_id, npages, err);
370                 goto out_4k;
371         }
372
373         dev->priv.fw_pages += npages;
374         if (func_id)
375                 dev->priv.vfs_pages += npages;
376         else if (mlx5_core_is_ecpf(dev) && !ec_function)
377                 dev->priv.peer_pf_pages += npages;
378
379         mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
380                       npages, ec_function, func_id, err);
381
382         kvfree(in);
383         return 0;
384
385 out_4k:
386         for (i--; i >= 0; i--)
387                 free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id);
388 out_free:
389         kvfree(in);
390         if (notify_fail)
391                 page_notify_fail(dev, func_id, ec_function);
392         return err;
393 }
394
395 static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
396                               bool ec_function)
397 {
398         struct rb_root *root;
399         struct rb_node *p;
400         int npages = 0;
401
402         root = xa_load(&dev->priv.page_root_xa, func_id);
403         if (WARN_ON_ONCE(!root))
404                 return;
405
406         p = rb_first(root);
407         while (p) {
408                 struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
409
410                 p = rb_next(p);
411                 npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
412                 free_fwp(dev, fwp, fwp->free_count);
413         }
414
415         dev->priv.fw_pages -= npages;
416         if (func_id)
417                 dev->priv.vfs_pages -= npages;
418         else if (mlx5_core_is_ecpf(dev) && !ec_function)
419                 dev->priv.peer_pf_pages -= npages;
420
421         mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
422                       npages, ec_function, func_id);
423 }
424
425 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
426                              u32 *in, int in_size, u32 *out, int out_size)
427 {
428         struct rb_root *root;
429         struct fw_page *fwp;
430         struct rb_node *p;
431         u32 func_id;
432         u32 npages;
433         u32 i = 0;
434
435         if (!mlx5_cmd_is_down(dev))
436                 return mlx5_cmd_exec(dev, in, in_size, out, out_size);
437
438         /* No hard feelings, we want our pages back! */
439         npages = MLX5_GET(manage_pages_in, in, input_num_entries);
440         func_id = MLX5_GET(manage_pages_in, in, function_id);
441
442         root = xa_load(&dev->priv.page_root_xa, func_id);
443         if (WARN_ON_ONCE(!root))
444                 return -EEXIST;
445
446         p = rb_first(root);
447         while (p && i < npages) {
448                 fwp = rb_entry(p, struct fw_page, rb_node);
449                 p = rb_next(p);
450
451                 MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
452                 i++;
453         }
454
455         MLX5_SET(manage_pages_out, out, output_num_entries, i);
456         return 0;
457 }
458
459 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
460                          int *nclaimed, bool ec_function)
461 {
462         int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
463         u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
464         int num_claimed;
465         u32 *out;
466         int err;
467         int i;
468
469         if (nclaimed)
470                 *nclaimed = 0;
471
472         outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
473         out = kvzalloc(outlen, GFP_KERNEL);
474         if (!out)
475                 return -ENOMEM;
476
477         MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
478         MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
479         MLX5_SET(manage_pages_in, in, function_id, func_id);
480         MLX5_SET(manage_pages_in, in, input_num_entries, npages);
481         MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
482
483         mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
484                       func_id, npages, outlen);
485         err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
486         if (err) {
487                 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
488                 goto out_free;
489         }
490
491         num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
492         if (num_claimed > npages) {
493                 mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
494                                num_claimed, npages);
495                 err = -EINVAL;
496                 goto out_free;
497         }
498
499         for (i = 0; i < num_claimed; i++)
500                 free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id);
501
502         if (nclaimed)
503                 *nclaimed = num_claimed;
504
505         dev->priv.fw_pages -= num_claimed;
506         if (func_id)
507                 dev->priv.vfs_pages -= num_claimed;
508         else if (mlx5_core_is_ecpf(dev) && !ec_function)
509                 dev->priv.peer_pf_pages -= num_claimed;
510
511 out_free:
512         kvfree(out);
513         return err;
514 }
515
516 static void pages_work_handler(struct work_struct *work)
517 {
518         struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
519         struct mlx5_core_dev *dev = req->dev;
520         int err = 0;
521
522         if (req->release_all)
523                 release_all_pages(dev, req->func_id, req->ec_function);
524         else if (req->npages < 0)
525                 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
526                                     req->ec_function);
527         else if (req->npages > 0)
528                 err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
529
530         if (err)
531                 mlx5_core_warn(dev, "%s fail %d\n",
532                                req->npages < 0 ? "reclaim" : "give", err);
533
534         kfree(req);
535 }
536
537 enum {
538         EC_FUNCTION_MASK = 0x8000,
539         RELEASE_ALL_PAGES_MASK = 0x4000,
540 };
541
542 static int req_pages_handler(struct notifier_block *nb,
543                              unsigned long type, void *data)
544 {
545         struct mlx5_pages_req *req;
546         struct mlx5_core_dev *dev;
547         struct mlx5_priv *priv;
548         struct mlx5_eqe *eqe;
549         bool ec_function;
550         bool release_all;
551         u16 func_id;
552         s32 npages;
553
554         priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
555         dev  = container_of(priv, struct mlx5_core_dev, priv);
556         eqe  = data;
557
558         func_id = be16_to_cpu(eqe->data.req_pages.func_id);
559         npages  = be32_to_cpu(eqe->data.req_pages.num_pages);
560         ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
561         release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
562                       RELEASE_ALL_PAGES_MASK;
563         mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
564                       func_id, npages, release_all);
565         req = kzalloc(sizeof(*req), GFP_ATOMIC);
566         if (!req) {
567                 mlx5_core_warn(dev, "failed to allocate pages request\n");
568                 return NOTIFY_DONE;
569         }
570
571         req->dev = dev;
572         req->func_id = func_id;
573         req->npages = npages;
574         req->ec_function = ec_function;
575         req->release_all = release_all;
576         INIT_WORK(&req->work, pages_work_handler);
577         queue_work(dev->priv.pg_wq, &req->work);
578         return NOTIFY_OK;
579 }
580
581 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
582 {
583         u16 func_id;
584         s32 npages;
585         int err;
586
587         err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
588         if (err)
589                 return err;
590
591         mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
592                       npages, boot ? "boot" : "init", func_id);
593
594         return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
595 }
596
597 enum {
598         MLX5_BLKS_FOR_RECLAIM_PAGES = 12
599 };
600
601 static int optimal_reclaimed_pages(void)
602 {
603         struct mlx5_cmd_prot_block *block;
604         struct mlx5_cmd_layout *lay;
605         int ret;
606
607         ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
608                MLX5_ST_SZ_BYTES(manage_pages_out)) /
609                MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
610
611         return ret;
612 }
613
614 static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
615                                    struct rb_root *root, u16 func_id)
616 {
617         unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
618
619         while (!RB_EMPTY_ROOT(root)) {
620                 int nclaimed;
621                 int err;
622
623                 err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
624                                     &nclaimed, mlx5_core_is_ecpf(dev));
625                 if (err) {
626                         mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
627                                        err, func_id);
628                         return err;
629                 }
630
631                 if (nclaimed)
632                         end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
633
634                 if (time_after(jiffies, end)) {
635                         mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
636                         break;
637                 }
638         }
639
640         return 0;
641 }
642
643 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
644 {
645         struct rb_root *root;
646         unsigned long id;
647         void *entry;
648
649         xa_for_each(&dev->priv.page_root_xa, id, entry) {
650                 root = entry;
651                 mlx5_reclaim_root_pages(dev, root, id);
652                 xa_erase(&dev->priv.page_root_xa, id);
653                 kfree(root);
654         }
655
656         WARN_ON(!xa_empty(&dev->priv.page_root_xa));
657
658         WARN(dev->priv.fw_pages,
659              "FW pages counter is %d after reclaiming all pages\n",
660              dev->priv.fw_pages);
661         WARN(dev->priv.vfs_pages,
662              "VFs FW pages counter is %d after reclaiming all pages\n",
663              dev->priv.vfs_pages);
664         WARN(dev->priv.peer_pf_pages,
665              "Peer PF FW pages counter is %d after reclaiming all pages\n",
666              dev->priv.peer_pf_pages);
667
668         return 0;
669 }
670
671 int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
672 {
673         INIT_LIST_HEAD(&dev->priv.free_list);
674         dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
675         if (!dev->priv.pg_wq)
676                 return -ENOMEM;
677
678         xa_init(&dev->priv.page_root_xa);
679
680         return 0;
681 }
682
683 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
684 {
685         xa_destroy(&dev->priv.page_root_xa);
686         destroy_workqueue(dev->priv.pg_wq);
687 }
688
689 void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
690 {
691         MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
692         mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
693 }
694
695 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
696 {
697         mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
698         flush_workqueue(dev->priv.pg_wq);
699 }
700
701 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
702 {
703         unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
704         int prev_pages = *pages;
705
706         /* In case of internal error we will free the pages manually later */
707         if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
708                 mlx5_core_warn(dev, "Skipping wait for vf pages stage");
709                 return 0;
710         }
711
712         mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
713         while (*pages) {
714                 if (time_after(jiffies, end)) {
715                         mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
716                         return -ETIMEDOUT;
717                 }
718                 if (*pages < prev_pages) {
719                         end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
720                         prev_pages = *pages;
721                 }
722                 msleep(50);
723         }
724
725         mlx5_core_dbg(dev, "All pages received\n");
726         return 0;
727 }