Merge tag 'mlx5-updates-2020-05-15' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / pagealloc.c
index 91bd258..5ddd186 100644 (file)
@@ -35,7 +35,6 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
 #include "mlx5_core.h"
 #include "lib/eq.h"
 
@@ -51,6 +50,7 @@ struct mlx5_pages_req {
        u8      ec_function;
        s32     npages;
        struct work_struct work;
+       u8      release_all;
 };
 
 struct fw_page {
@@ -136,8 +136,8 @@ static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
                                s32 *npages, int boot)
 {
-       u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
-       u32 in[MLX5_ST_SZ_DW(query_pages_in)]   = {0};
+       u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
        int err;
 
        MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
@@ -146,7 +146,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
                 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
        MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
 
-       err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+       err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
        if (err)
                return err;
 
@@ -156,15 +156,21 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
        return err;
 }
 
-static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
+static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
 {
-       struct fw_page *fp;
+       struct fw_page *fp = NULL;
+       struct fw_page *iter;
        unsigned n;
 
-       if (list_empty(&dev->priv.free_list))
+       list_for_each_entry(iter, &dev->priv.free_list, list) {
+               if (iter->func_id != func_id)
+                       continue;
+               fp = iter;
+       }
+
+       if (list_empty(&dev->priv.free_list) || !fp)
                return -ENOMEM;
 
-       fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
        n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
        if (n >= MLX5_NUM_4K_IN_PAGE) {
                mlx5_core_warn(dev, "alloc 4k bug\n");
@@ -182,6 +188,18 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
 
 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
 
+static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
+                    bool in_free_list)
+{
+       rb_erase(&fwp->rb_node, &dev->priv.page_root);
+       if (in_free_list)
+               list_del(&fwp->list);
+       dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK,
+                      PAGE_SIZE, DMA_BIDIRECTIONAL);
+       __free_page(fwp->page);
+       kfree(fwp);
+}
+
 static void free_4k(struct mlx5_core_dev *dev, u64 addr)
 {
        struct fw_page *fwp;
@@ -189,24 +207,16 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
 
        fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
        if (!fwp) {
-               mlx5_core_warn(dev, "page not found\n");
+               mlx5_core_warn_rl(dev, "page not found\n");
                return;
        }
-
        n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
        fwp->free_count++;
        set_bit(n, &fwp->bitmask);
-       if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
-               rb_erase(&fwp->rb_node, &dev->priv.page_root);
-               if (fwp->free_count != 1)
-                       list_del(&fwp->list);
-               dma_unmap_page(dev->device, addr & MLX5_U64_4K_PAGE_MASK,
-                              PAGE_SIZE, DMA_BIDIRECTIONAL);
-               __free_page(fwp->page);
-               kfree(fwp);
-       } else if (fwp->free_count == 1) {
+       if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
+               free_fwp(dev, fwp, fwp->free_count != 1);
+       else if (fwp->free_count == 1)
                list_add(&fwp->list, &dev->priv.free_list);
-       }
 }
 
 static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
@@ -257,8 +267,7 @@ err_mapping:
 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
                             bool ec_function)
 {
-       u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
-       u32 in[MLX5_ST_SZ_DW(manage_pages_in)]   = {0};
+       u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
        int err;
 
        MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
@@ -266,7 +275,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
        MLX5_SET(manage_pages_in, in, function_id, func_id);
        MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
 
-       err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+       err = mlx5_cmd_exec_in(dev, manage_pages, in);
        if (err)
                mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
                               func_id, err);
@@ -292,7 +301,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
 
        for (i = 0; i < npages; i++) {
 retry:
-               err = alloc_4k(dev, &addr);
+               err = alloc_4k(dev, &addr, func_id);
                if (err) {
                        if (err == -ENOMEM)
                                err = alloc_system_page(dev, func_id);
@@ -339,6 +348,33 @@ out_free:
        return err;
 }
 
+static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
+                             bool ec_function)
+{
+       struct rb_node *p;
+       int npages = 0;
+
+       p = rb_first(&dev->priv.page_root);
+       while (p) {
+               struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
+
+               p = rb_next(p);
+               if (fwp->func_id != func_id)
+                       continue;
+               npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
+               free_fwp(dev, fwp, fwp->free_count);
+       }
+
+       dev->priv.fw_pages -= npages;
+       if (func_id)
+               dev->priv.vfs_pages -= npages;
+       else if (mlx5_core_is_ecpf(dev) && !ec_function)
+               dev->priv.peer_pf_pages -= npages;
+
+       mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
+                     npages, ec_function, func_id);
+}
+
 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
                             u32 *in, int in_size, u32 *out, int out_size)
 {
@@ -374,7 +410,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
                         int *nclaimed, bool ec_function)
 {
        int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
-       u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
+       u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
        int num_claimed;
        u32 *out;
        int err;
@@ -432,7 +468,9 @@ static void pages_work_handler(struct work_struct *work)
        struct mlx5_core_dev *dev = req->dev;
        int err = 0;
 
-       if (req->npages < 0)
+       if (req->release_all)
+               release_all_pages(dev, req->func_id, req->ec_function);
+       else if (req->npages < 0)
                err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
                                    req->ec_function);
        else if (req->npages > 0)
@@ -447,6 +485,7 @@ static void pages_work_handler(struct work_struct *work)
 
 enum {
        EC_FUNCTION_MASK = 0x8000,
+       RELEASE_ALL_PAGES_MASK = 0x4000,
 };
 
 static int req_pages_handler(struct notifier_block *nb,
@@ -457,6 +496,7 @@ static int req_pages_handler(struct notifier_block *nb,
        struct mlx5_priv *priv;
        struct mlx5_eqe *eqe;
        bool ec_function;
+       bool release_all;
        u16 func_id;
        s32 npages;
 
@@ -467,8 +507,10 @@ static int req_pages_handler(struct notifier_block *nb,
        func_id = be16_to_cpu(eqe->data.req_pages.func_id);
        npages  = be32_to_cpu(eqe->data.req_pages.num_pages);
        ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
-       mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
-                     func_id, npages);
+       release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
+                     RELEASE_ALL_PAGES_MASK;
+       mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
+                     func_id, npages, release_all);
        req = kzalloc(sizeof(*req), GFP_ATOMIC);
        if (!req) {
                mlx5_core_warn(dev, "failed to allocate pages request\n");
@@ -479,6 +521,7 @@ static int req_pages_handler(struct notifier_block *nb,
        req->func_id = func_id;
        req->npages = npages;
        req->ec_function = ec_function;
+       req->release_all = release_all;
        INIT_WORK(&req->work, pages_work_handler);
        queue_work(dev->priv.pg_wq, &req->work);
        return NOTIFY_OK;