2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list;
59 struct list_head list;
74 struct list_head list;
76 enum mlx4_protocol prot;
77 enum mlx4_steer_type steer;
82 RES_QP_BUSY = RES_ANY_BUSY,
84 /* QP number was allocated */
87 /* ICM memory for QP context was mapped */
90 /* QP is in hw ownership */
95 struct res_common com;
100 struct list_head mcg_list;
106 enum res_mtt_states {
107 RES_MTT_BUSY = RES_ANY_BUSY,
111 static inline const char *mtt_states_str(enum res_mtt_states state)
114 case RES_MTT_BUSY: return "RES_MTT_BUSY";
115 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
116 default: return "Unknown";
121 struct res_common com;
126 enum res_mpt_states {
127 RES_MPT_BUSY = RES_ANY_BUSY,
134 struct res_common com;
140 RES_EQ_BUSY = RES_ANY_BUSY,
146 struct res_common com;
151 RES_CQ_BUSY = RES_ANY_BUSY,
157 struct res_common com;
162 enum res_srq_states {
163 RES_SRQ_BUSY = RES_ANY_BUSY,
169 struct res_common com;
175 enum res_counter_states {
176 RES_COUNTER_BUSY = RES_ANY_BUSY,
177 RES_COUNTER_ALLOCATED,
181 struct res_common com;
185 enum res_xrcdn_states {
186 RES_XRCD_BUSY = RES_ANY_BUSY,
191 struct res_common com;
195 enum res_fs_rule_states {
196 RES_FS_RULE_BUSY = RES_ANY_BUSY,
197 RES_FS_RULE_ALLOCATED,
201 struct res_common com;
205 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
207 struct rb_node *node = root->rb_node;
210 struct res_common *res = container_of(node, struct res_common,
213 if (res_id < res->res_id)
214 node = node->rb_left;
215 else if (res_id > res->res_id)
216 node = node->rb_right;
223 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
225 struct rb_node **new = &(root->rb_node), *parent = NULL;
227 /* Figure out where to put new node */
229 struct res_common *this = container_of(*new, struct res_common,
233 if (res->res_id < this->res_id)
234 new = &((*new)->rb_left);
235 else if (res->res_id > this->res_id)
236 new = &((*new)->rb_right);
241 /* Add new node and rebalance tree. */
242 rb_link_node(&res->node, parent, new);
243 rb_insert_color(&res->node, root);
258 static const char *ResourceType(enum mlx4_resource rt)
261 case RES_QP: return "RES_QP";
262 case RES_CQ: return "RES_CQ";
263 case RES_SRQ: return "RES_SRQ";
264 case RES_MPT: return "RES_MPT";
265 case RES_MTT: return "RES_MTT";
266 case RES_MAC: return "RES_MAC";
267 case RES_EQ: return "RES_EQ";
268 case RES_COUNTER: return "RES_COUNTER";
269 case RES_FS_RULE: return "RES_FS_RULE";
270 case RES_XRCD: return "RES_XRCD";
271 default: return "Unknown resource type !!!";
275 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
277 struct mlx4_priv *priv = mlx4_priv(dev);
281 priv->mfunc.master.res_tracker.slave_list =
282 kzalloc(dev->num_slaves * sizeof(struct slave_list),
284 if (!priv->mfunc.master.res_tracker.slave_list)
287 for (i = 0 ; i < dev->num_slaves; i++) {
288 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
289 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
290 slave_list[i].res_list[t]);
291 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
294 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
296 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
297 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
299 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
303 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
304 enum mlx4_res_tracker_free_type type)
306 struct mlx4_priv *priv = mlx4_priv(dev);
309 if (priv->mfunc.master.res_tracker.slave_list) {
310 if (type != RES_TR_FREE_STRUCTS_ONLY)
311 for (i = 0 ; i < dev->num_slaves; i++)
312 if (type == RES_TR_FREE_ALL ||
313 dev->caps.function != i)
314 mlx4_delete_all_resources_for_slave(dev, i);
316 if (type != RES_TR_FREE_SLAVES_ONLY) {
317 kfree(priv->mfunc.master.res_tracker.slave_list);
318 priv->mfunc.master.res_tracker.slave_list = NULL;
323 static void update_pkey_index(struct mlx4_dev *dev, int slave,
324 struct mlx4_cmd_mailbox *inbox)
326 u8 sched = *(u8 *)(inbox->buf + 64);
327 u8 orig_index = *(u8 *)(inbox->buf + 35);
329 struct mlx4_priv *priv = mlx4_priv(dev);
332 port = (sched >> 6 & 1) + 1;
334 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
335 *(u8 *)(inbox->buf + 35) = new_index;
338 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
341 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
342 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
343 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
345 if (MLX4_QP_ST_UD == ts)
346 qp_ctx->pri_path.mgid_index = 0x80 | slave;
348 if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
349 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
350 qp_ctx->pri_path.mgid_index = slave & 0x7F;
351 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
352 qp_ctx->alt_path.mgid_index = slave & 0x7F;
356 static int update_vport_qp_param(struct mlx4_dev *dev,
357 struct mlx4_cmd_mailbox *inbox,
360 struct mlx4_qp_context *qpc = inbox->buf + 8;
361 struct mlx4_vport_oper_state *vp_oper;
362 struct mlx4_priv *priv;
366 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
367 priv = mlx4_priv(dev);
368 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
370 if (MLX4_VGT != vp_oper->state.default_vlan) {
371 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
372 if (MLX4_QP_ST_RC == qp_type)
375 /* force strip vlan by clear vsd */
376 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
377 if (0 != vp_oper->state.default_vlan) {
378 qpc->pri_path.vlan_control =
379 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
380 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
381 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
382 } else { /* priority tagged */
383 qpc->pri_path.vlan_control =
384 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
385 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
388 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
389 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
390 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
391 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
392 qpc->pri_path.sched_queue &= 0xC7;
393 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
395 if (vp_oper->state.spoofchk) {
396 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
397 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
402 static int mpt_mask(struct mlx4_dev *dev)
404 return dev->caps.num_mpts - 1;
407 static void *find_res(struct mlx4_dev *dev, u64 res_id,
408 enum mlx4_resource type)
410 struct mlx4_priv *priv = mlx4_priv(dev);
412 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
416 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
417 enum mlx4_resource type,
420 struct res_common *r;
423 spin_lock_irq(mlx4_tlock(dev));
424 r = find_res(dev, res_id, type);
430 if (r->state == RES_ANY_BUSY) {
435 if (r->owner != slave) {
440 r->from_state = r->state;
441 r->state = RES_ANY_BUSY;
444 *((struct res_common **)res) = r;
447 spin_unlock_irq(mlx4_tlock(dev));
451 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
452 enum mlx4_resource type,
453 u64 res_id, int *slave)
456 struct res_common *r;
462 spin_lock(mlx4_tlock(dev));
464 r = find_res(dev, id, type);
469 spin_unlock(mlx4_tlock(dev));
474 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
475 enum mlx4_resource type)
477 struct res_common *r;
479 spin_lock_irq(mlx4_tlock(dev));
480 r = find_res(dev, res_id, type);
482 r->state = r->from_state;
483 spin_unlock_irq(mlx4_tlock(dev));
486 static struct res_common *alloc_qp_tr(int id)
490 ret = kzalloc(sizeof *ret, GFP_KERNEL);
494 ret->com.res_id = id;
495 ret->com.state = RES_QP_RESERVED;
497 INIT_LIST_HEAD(&ret->mcg_list);
498 spin_lock_init(&ret->mcg_spl);
499 atomic_set(&ret->ref_count, 0);
504 static struct res_common *alloc_mtt_tr(int id, int order)
508 ret = kzalloc(sizeof *ret, GFP_KERNEL);
512 ret->com.res_id = id;
514 ret->com.state = RES_MTT_ALLOCATED;
515 atomic_set(&ret->ref_count, 0);
520 static struct res_common *alloc_mpt_tr(int id, int key)
524 ret = kzalloc(sizeof *ret, GFP_KERNEL);
528 ret->com.res_id = id;
529 ret->com.state = RES_MPT_RESERVED;
535 static struct res_common *alloc_eq_tr(int id)
539 ret = kzalloc(sizeof *ret, GFP_KERNEL);
543 ret->com.res_id = id;
544 ret->com.state = RES_EQ_RESERVED;
549 static struct res_common *alloc_cq_tr(int id)
553 ret = kzalloc(sizeof *ret, GFP_KERNEL);
557 ret->com.res_id = id;
558 ret->com.state = RES_CQ_ALLOCATED;
559 atomic_set(&ret->ref_count, 0);
564 static struct res_common *alloc_srq_tr(int id)
568 ret = kzalloc(sizeof *ret, GFP_KERNEL);
572 ret->com.res_id = id;
573 ret->com.state = RES_SRQ_ALLOCATED;
574 atomic_set(&ret->ref_count, 0);
579 static struct res_common *alloc_counter_tr(int id)
581 struct res_counter *ret;
583 ret = kzalloc(sizeof *ret, GFP_KERNEL);
587 ret->com.res_id = id;
588 ret->com.state = RES_COUNTER_ALLOCATED;
593 static struct res_common *alloc_xrcdn_tr(int id)
595 struct res_xrcdn *ret;
597 ret = kzalloc(sizeof *ret, GFP_KERNEL);
601 ret->com.res_id = id;
602 ret->com.state = RES_XRCD_ALLOCATED;
607 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
609 struct res_fs_rule *ret;
611 ret = kzalloc(sizeof *ret, GFP_KERNEL);
615 ret->com.res_id = id;
616 ret->com.state = RES_FS_RULE_ALLOCATED;
621 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
624 struct res_common *ret;
628 ret = alloc_qp_tr(id);
631 ret = alloc_mpt_tr(id, extra);
634 ret = alloc_mtt_tr(id, extra);
637 ret = alloc_eq_tr(id);
640 ret = alloc_cq_tr(id);
643 ret = alloc_srq_tr(id);
646 printk(KERN_ERR "implementation missing\n");
649 ret = alloc_counter_tr(id);
652 ret = alloc_xrcdn_tr(id);
655 ret = alloc_fs_rule_tr(id, extra);
666 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
667 enum mlx4_resource type, int extra)
671 struct mlx4_priv *priv = mlx4_priv(dev);
672 struct res_common **res_arr;
673 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
674 struct rb_root *root = &tracker->res_tree[type];
676 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
680 for (i = 0; i < count; ++i) {
681 res_arr[i] = alloc_tr(base + i, type, slave, extra);
683 for (--i; i >= 0; --i)
691 spin_lock_irq(mlx4_tlock(dev));
692 for (i = 0; i < count; ++i) {
693 if (find_res(dev, base + i, type)) {
697 err = res_tracker_insert(root, res_arr[i]);
700 list_add_tail(&res_arr[i]->list,
701 &tracker->slave_list[slave].res_list[type]);
703 spin_unlock_irq(mlx4_tlock(dev));
709 for (--i; i >= base; --i)
710 rb_erase(&res_arr[i]->node, root);
712 spin_unlock_irq(mlx4_tlock(dev));
714 for (i = 0; i < count; ++i)
722 static int remove_qp_ok(struct res_qp *res)
724 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
725 !list_empty(&res->mcg_list)) {
726 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
727 res->com.state, atomic_read(&res->ref_count));
729 } else if (res->com.state != RES_QP_RESERVED) {
736 static int remove_mtt_ok(struct res_mtt *res, int order)
738 if (res->com.state == RES_MTT_BUSY ||
739 atomic_read(&res->ref_count)) {
740 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
742 mtt_states_str(res->com.state),
743 atomic_read(&res->ref_count));
745 } else if (res->com.state != RES_MTT_ALLOCATED)
747 else if (res->order != order)
753 static int remove_mpt_ok(struct res_mpt *res)
755 if (res->com.state == RES_MPT_BUSY)
757 else if (res->com.state != RES_MPT_RESERVED)
763 static int remove_eq_ok(struct res_eq *res)
765 if (res->com.state == RES_MPT_BUSY)
767 else if (res->com.state != RES_MPT_RESERVED)
773 static int remove_counter_ok(struct res_counter *res)
775 if (res->com.state == RES_COUNTER_BUSY)
777 else if (res->com.state != RES_COUNTER_ALLOCATED)
783 static int remove_xrcdn_ok(struct res_xrcdn *res)
785 if (res->com.state == RES_XRCD_BUSY)
787 else if (res->com.state != RES_XRCD_ALLOCATED)
793 static int remove_fs_rule_ok(struct res_fs_rule *res)
795 if (res->com.state == RES_FS_RULE_BUSY)
797 else if (res->com.state != RES_FS_RULE_ALLOCATED)
803 static int remove_cq_ok(struct res_cq *res)
805 if (res->com.state == RES_CQ_BUSY)
807 else if (res->com.state != RES_CQ_ALLOCATED)
813 static int remove_srq_ok(struct res_srq *res)
815 if (res->com.state == RES_SRQ_BUSY)
817 else if (res->com.state != RES_SRQ_ALLOCATED)
823 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
827 return remove_qp_ok((struct res_qp *)res);
829 return remove_cq_ok((struct res_cq *)res);
831 return remove_srq_ok((struct res_srq *)res);
833 return remove_mpt_ok((struct res_mpt *)res);
835 return remove_mtt_ok((struct res_mtt *)res, extra);
839 return remove_eq_ok((struct res_eq *)res);
841 return remove_counter_ok((struct res_counter *)res);
843 return remove_xrcdn_ok((struct res_xrcdn *)res);
845 return remove_fs_rule_ok((struct res_fs_rule *)res);
851 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
852 enum mlx4_resource type, int extra)
856 struct mlx4_priv *priv = mlx4_priv(dev);
857 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
858 struct res_common *r;
860 spin_lock_irq(mlx4_tlock(dev));
861 for (i = base; i < base + count; ++i) {
862 r = res_tracker_lookup(&tracker->res_tree[type], i);
867 if (r->owner != slave) {
871 err = remove_ok(r, type, extra);
876 for (i = base; i < base + count; ++i) {
877 r = res_tracker_lookup(&tracker->res_tree[type], i);
878 rb_erase(&r->node, &tracker->res_tree[type]);
885 spin_unlock_irq(mlx4_tlock(dev));
890 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
891 enum res_qp_states state, struct res_qp **qp,
894 struct mlx4_priv *priv = mlx4_priv(dev);
895 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
899 spin_lock_irq(mlx4_tlock(dev));
900 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
903 else if (r->com.owner != slave)
908 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
909 __func__, r->com.res_id);
913 case RES_QP_RESERVED:
914 if (r->com.state == RES_QP_MAPPED && !alloc)
917 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
922 if ((r->com.state == RES_QP_RESERVED && alloc) ||
923 r->com.state == RES_QP_HW)
926 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
934 if (r->com.state != RES_QP_MAPPED)
942 r->com.from_state = r->com.state;
943 r->com.to_state = state;
944 r->com.state = RES_QP_BUSY;
950 spin_unlock_irq(mlx4_tlock(dev));
955 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
956 enum res_mpt_states state, struct res_mpt **mpt)
958 struct mlx4_priv *priv = mlx4_priv(dev);
959 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
963 spin_lock_irq(mlx4_tlock(dev));
964 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
967 else if (r->com.owner != slave)
975 case RES_MPT_RESERVED:
976 if (r->com.state != RES_MPT_MAPPED)
981 if (r->com.state != RES_MPT_RESERVED &&
982 r->com.state != RES_MPT_HW)
987 if (r->com.state != RES_MPT_MAPPED)
995 r->com.from_state = r->com.state;
996 r->com.to_state = state;
997 r->com.state = RES_MPT_BUSY;
1003 spin_unlock_irq(mlx4_tlock(dev));
1008 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1009 enum res_eq_states state, struct res_eq **eq)
1011 struct mlx4_priv *priv = mlx4_priv(dev);
1012 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1016 spin_lock_irq(mlx4_tlock(dev));
1017 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1020 else if (r->com.owner != slave)
1028 case RES_EQ_RESERVED:
1029 if (r->com.state != RES_EQ_HW)
1034 if (r->com.state != RES_EQ_RESERVED)
1043 r->com.from_state = r->com.state;
1044 r->com.to_state = state;
1045 r->com.state = RES_EQ_BUSY;
1051 spin_unlock_irq(mlx4_tlock(dev));
1056 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1057 enum res_cq_states state, struct res_cq **cq)
1059 struct mlx4_priv *priv = mlx4_priv(dev);
1060 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1064 spin_lock_irq(mlx4_tlock(dev));
1065 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1068 else if (r->com.owner != slave)
1076 case RES_CQ_ALLOCATED:
1077 if (r->com.state != RES_CQ_HW)
1079 else if (atomic_read(&r->ref_count))
1086 if (r->com.state != RES_CQ_ALLOCATED)
1097 r->com.from_state = r->com.state;
1098 r->com.to_state = state;
1099 r->com.state = RES_CQ_BUSY;
1105 spin_unlock_irq(mlx4_tlock(dev));
1110 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1111 enum res_cq_states state, struct res_srq **srq)
1113 struct mlx4_priv *priv = mlx4_priv(dev);
1114 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1118 spin_lock_irq(mlx4_tlock(dev));
1119 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1122 else if (r->com.owner != slave)
1130 case RES_SRQ_ALLOCATED:
1131 if (r->com.state != RES_SRQ_HW)
1133 else if (atomic_read(&r->ref_count))
1138 if (r->com.state != RES_SRQ_ALLOCATED)
1147 r->com.from_state = r->com.state;
1148 r->com.to_state = state;
1149 r->com.state = RES_SRQ_BUSY;
1155 spin_unlock_irq(mlx4_tlock(dev));
1160 static void res_abort_move(struct mlx4_dev *dev, int slave,
1161 enum mlx4_resource type, int id)
1163 struct mlx4_priv *priv = mlx4_priv(dev);
1164 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1165 struct res_common *r;
1167 spin_lock_irq(mlx4_tlock(dev));
1168 r = res_tracker_lookup(&tracker->res_tree[type], id);
1169 if (r && (r->owner == slave))
1170 r->state = r->from_state;
1171 spin_unlock_irq(mlx4_tlock(dev));
1174 static void res_end_move(struct mlx4_dev *dev, int slave,
1175 enum mlx4_resource type, int id)
1177 struct mlx4_priv *priv = mlx4_priv(dev);
1178 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1179 struct res_common *r;
1181 spin_lock_irq(mlx4_tlock(dev));
1182 r = res_tracker_lookup(&tracker->res_tree[type], id);
1183 if (r && (r->owner == slave))
1184 r->state = r->to_state;
1185 spin_unlock_irq(mlx4_tlock(dev));
1188 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1190 return mlx4_is_qp_reserved(dev, qpn) &&
1191 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1194 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1196 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1199 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1200 u64 in_param, u64 *out_param)
1209 case RES_OP_RESERVE:
1210 count = get_param_l(&in_param);
1211 align = get_param_h(&in_param);
1212 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1216 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1218 __mlx4_qp_release_range(dev, base, count);
1221 set_param_l(out_param, base);
1223 case RES_OP_MAP_ICM:
1224 qpn = get_param_l(&in_param) & 0x7fffff;
1225 if (valid_reserved(dev, slave, qpn)) {
1226 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1231 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1236 if (!fw_reserved(dev, qpn)) {
1237 err = __mlx4_qp_alloc_icm(dev, qpn);
1239 res_abort_move(dev, slave, RES_QP, qpn);
1244 res_end_move(dev, slave, RES_QP, qpn);
1254 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1255 u64 in_param, u64 *out_param)
1261 if (op != RES_OP_RESERVE_AND_MAP)
1264 order = get_param_l(&in_param);
1265 base = __mlx4_alloc_mtt_range(dev, order);
1269 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1271 __mlx4_free_mtt_range(dev, base, order);
1273 set_param_l(out_param, base);
1278 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1279 u64 in_param, u64 *out_param)
1284 struct res_mpt *mpt;
1287 case RES_OP_RESERVE:
1288 index = __mlx4_mpt_reserve(dev);
1291 id = index & mpt_mask(dev);
1293 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1295 __mlx4_mpt_release(dev, index);
1298 set_param_l(out_param, index);
1300 case RES_OP_MAP_ICM:
1301 index = get_param_l(&in_param);
1302 id = index & mpt_mask(dev);
1303 err = mr_res_start_move_to(dev, slave, id,
1304 RES_MPT_MAPPED, &mpt);
1308 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1310 res_abort_move(dev, slave, RES_MPT, id);
1314 res_end_move(dev, slave, RES_MPT, id);
1320 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1321 u64 in_param, u64 *out_param)
1327 case RES_OP_RESERVE_AND_MAP:
1328 err = __mlx4_cq_alloc_icm(dev, &cqn);
1332 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1334 __mlx4_cq_free_icm(dev, cqn);
1338 set_param_l(out_param, cqn);
1348 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1349 u64 in_param, u64 *out_param)
1355 case RES_OP_RESERVE_AND_MAP:
1356 err = __mlx4_srq_alloc_icm(dev, &srqn);
1360 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1362 __mlx4_srq_free_icm(dev, srqn);
1366 set_param_l(out_param, srqn);
1376 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1378 struct mlx4_priv *priv = mlx4_priv(dev);
1379 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1380 struct mac_res *res;
1382 res = kzalloc(sizeof *res, GFP_KERNEL);
1386 res->port = (u8) port;
1387 list_add_tail(&res->list,
1388 &tracker->slave_list[slave].res_list[RES_MAC]);
1392 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1395 struct mlx4_priv *priv = mlx4_priv(dev);
1396 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1397 struct list_head *mac_list =
1398 &tracker->slave_list[slave].res_list[RES_MAC];
1399 struct mac_res *res, *tmp;
1401 list_for_each_entry_safe(res, tmp, mac_list, list) {
1402 if (res->mac == mac && res->port == (u8) port) {
1403 list_del(&res->list);
1410 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1412 struct mlx4_priv *priv = mlx4_priv(dev);
1413 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1414 struct list_head *mac_list =
1415 &tracker->slave_list[slave].res_list[RES_MAC];
1416 struct mac_res *res, *tmp;
1418 list_for_each_entry_safe(res, tmp, mac_list, list) {
1419 list_del(&res->list);
1420 __mlx4_unregister_mac(dev, res->port, res->mac);
1425 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1426 u64 in_param, u64 *out_param)
1432 if (op != RES_OP_RESERVE_AND_MAP)
1435 port = get_param_l(out_param);
1438 err = __mlx4_register_mac(dev, port, mac);
1440 set_param_l(out_param, err);
1445 err = mac_add_to_slave(dev, slave, mac, port);
1447 __mlx4_unregister_mac(dev, port, mac);
1452 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1453 u64 in_param, u64 *out_param)
1458 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1459 u64 in_param, u64 *out_param)
1464 if (op != RES_OP_RESERVE)
1467 err = __mlx4_counter_alloc(dev, &index);
1471 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1473 __mlx4_counter_free(dev, index);
1475 set_param_l(out_param, index);
1480 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1481 u64 in_param, u64 *out_param)
1486 if (op != RES_OP_RESERVE)
1489 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1493 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1495 __mlx4_xrcd_free(dev, xrcdn);
1497 set_param_l(out_param, xrcdn);
1502 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1503 struct mlx4_vhcr *vhcr,
1504 struct mlx4_cmd_mailbox *inbox,
1505 struct mlx4_cmd_mailbox *outbox,
1506 struct mlx4_cmd_info *cmd)
1509 int alop = vhcr->op_modifier;
1511 switch (vhcr->in_modifier) {
1513 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1514 vhcr->in_param, &vhcr->out_param);
1518 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1519 vhcr->in_param, &vhcr->out_param);
1523 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1524 vhcr->in_param, &vhcr->out_param);
1528 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1529 vhcr->in_param, &vhcr->out_param);
1533 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1534 vhcr->in_param, &vhcr->out_param);
1538 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1539 vhcr->in_param, &vhcr->out_param);
1543 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1544 vhcr->in_param, &vhcr->out_param);
1548 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1549 vhcr->in_param, &vhcr->out_param);
1553 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1554 vhcr->in_param, &vhcr->out_param);
1565 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1574 case RES_OP_RESERVE:
1575 base = get_param_l(&in_param) & 0x7fffff;
1576 count = get_param_h(&in_param);
1577 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1580 __mlx4_qp_release_range(dev, base, count);
1582 case RES_OP_MAP_ICM:
1583 qpn = get_param_l(&in_param) & 0x7fffff;
1584 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1589 if (!fw_reserved(dev, qpn))
1590 __mlx4_qp_free_icm(dev, qpn);
1592 res_end_move(dev, slave, RES_QP, qpn);
1594 if (valid_reserved(dev, slave, qpn))
1595 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1604 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1605 u64 in_param, u64 *out_param)
1611 if (op != RES_OP_RESERVE_AND_MAP)
1614 base = get_param_l(&in_param);
1615 order = get_param_h(&in_param);
1616 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1618 __mlx4_free_mtt_range(dev, base, order);
1622 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1628 struct res_mpt *mpt;
1631 case RES_OP_RESERVE:
1632 index = get_param_l(&in_param);
1633 id = index & mpt_mask(dev);
1634 err = get_res(dev, slave, id, RES_MPT, &mpt);
1638 put_res(dev, slave, id, RES_MPT);
1640 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1643 __mlx4_mpt_release(dev, index);
1645 case RES_OP_MAP_ICM:
1646 index = get_param_l(&in_param);
1647 id = index & mpt_mask(dev);
1648 err = mr_res_start_move_to(dev, slave, id,
1649 RES_MPT_RESERVED, &mpt);
1653 __mlx4_mpt_free_icm(dev, mpt->key);
1654 res_end_move(dev, slave, RES_MPT, id);
1664 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1665 u64 in_param, u64 *out_param)
1671 case RES_OP_RESERVE_AND_MAP:
1672 cqn = get_param_l(&in_param);
1673 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1677 __mlx4_cq_free_icm(dev, cqn);
1688 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1689 u64 in_param, u64 *out_param)
1695 case RES_OP_RESERVE_AND_MAP:
1696 srqn = get_param_l(&in_param);
1697 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1701 __mlx4_srq_free_icm(dev, srqn);
1712 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1713 u64 in_param, u64 *out_param)
1719 case RES_OP_RESERVE_AND_MAP:
1720 port = get_param_l(out_param);
1721 mac_del_from_slave(dev, slave, in_param, port);
1722 __mlx4_unregister_mac(dev, port, in_param);
1733 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1734 u64 in_param, u64 *out_param)
1739 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1740 u64 in_param, u64 *out_param)
1745 if (op != RES_OP_RESERVE)
1748 index = get_param_l(&in_param);
1749 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1753 __mlx4_counter_free(dev, index);
1758 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1759 u64 in_param, u64 *out_param)
1764 if (op != RES_OP_RESERVE)
1767 xrcdn = get_param_l(&in_param);
1768 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1772 __mlx4_xrcd_free(dev, xrcdn);
1777 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1778 struct mlx4_vhcr *vhcr,
1779 struct mlx4_cmd_mailbox *inbox,
1780 struct mlx4_cmd_mailbox *outbox,
1781 struct mlx4_cmd_info *cmd)
1784 int alop = vhcr->op_modifier;
1786 switch (vhcr->in_modifier) {
1788 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1793 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1794 vhcr->in_param, &vhcr->out_param);
1798 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1803 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1804 vhcr->in_param, &vhcr->out_param);
1808 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1809 vhcr->in_param, &vhcr->out_param);
1813 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1814 vhcr->in_param, &vhcr->out_param);
1818 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1819 vhcr->in_param, &vhcr->out_param);
1823 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1824 vhcr->in_param, &vhcr->out_param);
1828 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1829 vhcr->in_param, &vhcr->out_param);
1837 /* ugly but other choices are uglier */
1838 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1840 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1843 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1845 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1848 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1850 return be32_to_cpu(mpt->mtt_sz);
1853 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
1855 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
1858 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
1860 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
1863 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
1865 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
1868 static int mr_is_region(struct mlx4_mpt_entry *mpt)
1870 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
1873 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1875 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1878 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1880 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1883 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1885 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1886 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1887 int log_sq_sride = qpc->sq_size_stride & 7;
1888 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1889 int log_rq_stride = qpc->rq_size_stride & 7;
1890 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1891 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1892 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1897 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1899 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1900 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1901 total_mem = sq_size + rq_size;
1903 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1909 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1910 int size, struct res_mtt *mtt)
1912 int res_start = mtt->com.res_id;
1913 int res_size = (1 << mtt->order);
1915 if (start < res_start || start + size > res_start + res_size)
1920 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1921 struct mlx4_vhcr *vhcr,
1922 struct mlx4_cmd_mailbox *inbox,
1923 struct mlx4_cmd_mailbox *outbox,
1924 struct mlx4_cmd_info *cmd)
1927 int index = vhcr->in_modifier;
1928 struct res_mtt *mtt;
1929 struct res_mpt *mpt;
1930 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1936 id = index & mpt_mask(dev);
1937 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1941 /* Disable memory windows for VFs. */
1942 if (!mr_is_region(inbox->buf)) {
1947 /* Make sure that the PD bits related to the slave id are zeros. */
1948 pd = mr_get_pd(inbox->buf);
1949 pd_slave = (pd >> 17) & 0x7f;
1950 if (pd_slave != 0 && pd_slave != slave) {
1955 if (mr_is_fmr(inbox->buf)) {
1956 /* FMR and Bind Enable are forbidden in slave devices. */
1957 if (mr_is_bind_enabled(inbox->buf)) {
1961 /* FMR and Memory Windows are also forbidden. */
1962 if (!mr_is_region(inbox->buf)) {
1968 phys = mr_phys_mpt(inbox->buf);
1970 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1974 err = check_mtt_range(dev, slave, mtt_base,
1975 mr_get_mtt_size(inbox->buf), mtt);
1982 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1987 atomic_inc(&mtt->ref_count);
1988 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1991 res_end_move(dev, slave, RES_MPT, id);
1996 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1998 res_abort_move(dev, slave, RES_MPT, id);
2003 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2004 struct mlx4_vhcr *vhcr,
2005 struct mlx4_cmd_mailbox *inbox,
2006 struct mlx4_cmd_mailbox *outbox,
2007 struct mlx4_cmd_info *cmd)
2010 int index = vhcr->in_modifier;
2011 struct res_mpt *mpt;
2014 id = index & mpt_mask(dev);
2015 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2019 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2024 atomic_dec(&mpt->mtt->ref_count);
2026 res_end_move(dev, slave, RES_MPT, id);
2030 res_abort_move(dev, slave, RES_MPT, id);
2035 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2036 struct mlx4_vhcr *vhcr,
2037 struct mlx4_cmd_mailbox *inbox,
2038 struct mlx4_cmd_mailbox *outbox,
2039 struct mlx4_cmd_info *cmd)
2042 int index = vhcr->in_modifier;
2043 struct res_mpt *mpt;
2046 id = index & mpt_mask(dev);
2047 err = get_res(dev, slave, id, RES_MPT, &mpt);
2051 if (mpt->com.from_state != RES_MPT_HW) {
2056 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2059 put_res(dev, slave, id, RES_MPT);
2063 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2065 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2068 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2070 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2073 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2075 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2078 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2079 struct mlx4_qp_context *context)
2081 u32 qpn = vhcr->in_modifier & 0xffffff;
2084 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2087 /* adjust qkey in qp context */
2088 context->qkey = cpu_to_be32(qkey);
2091 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2092 struct mlx4_vhcr *vhcr,
2093 struct mlx4_cmd_mailbox *inbox,
2094 struct mlx4_cmd_mailbox *outbox,
2095 struct mlx4_cmd_info *cmd)
2098 int qpn = vhcr->in_modifier & 0x7fffff;
2099 struct res_mtt *mtt;
2101 struct mlx4_qp_context *qpc = inbox->buf + 8;
2102 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2103 int mtt_size = qp_get_mtt_size(qpc);
2106 int rcqn = qp_get_rcqn(qpc);
2107 int scqn = qp_get_scqn(qpc);
2108 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2109 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2110 struct res_srq *srq;
2111 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2113 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2116 qp->local_qpn = local_qpn;
2118 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2122 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2126 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2131 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2138 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2143 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2144 update_pkey_index(dev, slave, inbox);
2145 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2148 atomic_inc(&mtt->ref_count);
2150 atomic_inc(&rcq->ref_count);
2152 atomic_inc(&scq->ref_count);
2156 put_res(dev, slave, scqn, RES_CQ);
2159 atomic_inc(&srq->ref_count);
2160 put_res(dev, slave, srqn, RES_SRQ);
2163 put_res(dev, slave, rcqn, RES_CQ);
2164 put_res(dev, slave, mtt_base, RES_MTT);
2165 res_end_move(dev, slave, RES_QP, qpn);
2171 put_res(dev, slave, srqn, RES_SRQ);
2174 put_res(dev, slave, scqn, RES_CQ);
2176 put_res(dev, slave, rcqn, RES_CQ);
2178 put_res(dev, slave, mtt_base, RES_MTT);
2180 res_abort_move(dev, slave, RES_QP, qpn);
2185 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2187 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2190 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2192 int log_eq_size = eqc->log_eq_size & 0x1f;
2193 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2195 if (log_eq_size + 5 < page_shift)
2198 return 1 << (log_eq_size + 5 - page_shift);
2201 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2203 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2206 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2208 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2209 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2211 if (log_cq_size + 5 < page_shift)
2214 return 1 << (log_cq_size + 5 - page_shift);
2217 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2218 struct mlx4_vhcr *vhcr,
2219 struct mlx4_cmd_mailbox *inbox,
2220 struct mlx4_cmd_mailbox *outbox,
2221 struct mlx4_cmd_info *cmd)
2224 int eqn = vhcr->in_modifier;
2225 int res_id = (slave << 8) | eqn;
2226 struct mlx4_eq_context *eqc = inbox->buf;
2227 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2228 int mtt_size = eq_get_mtt_size(eqc);
2230 struct res_mtt *mtt;
2232 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2235 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2239 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2243 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2247 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2251 atomic_inc(&mtt->ref_count);
2253 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2254 res_end_move(dev, slave, RES_EQ, res_id);
2258 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2260 res_abort_move(dev, slave, RES_EQ, res_id);
2262 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2266 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2267 int len, struct res_mtt **res)
2269 struct mlx4_priv *priv = mlx4_priv(dev);
2270 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2271 struct res_mtt *mtt;
2274 spin_lock_irq(mlx4_tlock(dev));
2275 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2277 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2279 mtt->com.from_state = mtt->com.state;
2280 mtt->com.state = RES_MTT_BUSY;
2285 spin_unlock_irq(mlx4_tlock(dev));
2290 static int verify_qp_parameters(struct mlx4_dev *dev,
2291 struct mlx4_cmd_mailbox *inbox,
2292 enum qp_transition transition, u8 slave)
2295 struct mlx4_qp_context *qp_ctx;
2296 enum mlx4_qp_optpar optpar;
2298 qp_ctx = inbox->buf + 8;
2299 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2300 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2305 switch (transition) {
2306 case QP_TRANS_INIT2RTR:
2307 case QP_TRANS_RTR2RTS:
2308 case QP_TRANS_RTS2RTS:
2309 case QP_TRANS_SQD2SQD:
2310 case QP_TRANS_SQD2RTS:
2311 if (slave != mlx4_master_func_num(dev))
2312 /* slaves have only gid index 0 */
2313 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2314 if (qp_ctx->pri_path.mgid_index)
2316 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2317 if (qp_ctx->alt_path.mgid_index)
2332 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2333 struct mlx4_vhcr *vhcr,
2334 struct mlx4_cmd_mailbox *inbox,
2335 struct mlx4_cmd_mailbox *outbox,
2336 struct mlx4_cmd_info *cmd)
2338 struct mlx4_mtt mtt;
2339 __be64 *page_list = inbox->buf;
2340 u64 *pg_list = (u64 *)page_list;
2342 struct res_mtt *rmtt = NULL;
2343 int start = be64_to_cpu(page_list[0]);
2344 int npages = vhcr->in_modifier;
2347 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2351 /* Call the SW implementation of write_mtt:
2352 * - Prepare a dummy mtt struct
2353 * - Translate inbox contents to simple addresses in host endianess */
2354 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2355 we don't really use it */
2358 for (i = 0; i < npages; ++i)
2359 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2361 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2362 ((u64 *)page_list + 2));
2365 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2370 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2371 struct mlx4_vhcr *vhcr,
2372 struct mlx4_cmd_mailbox *inbox,
2373 struct mlx4_cmd_mailbox *outbox,
2374 struct mlx4_cmd_info *cmd)
2376 int eqn = vhcr->in_modifier;
2377 int res_id = eqn | (slave << 8);
2381 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2385 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2389 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2393 atomic_dec(&eq->mtt->ref_count);
2394 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2395 res_end_move(dev, slave, RES_EQ, res_id);
2396 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2401 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2403 res_abort_move(dev, slave, RES_EQ, res_id);
2408 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2410 struct mlx4_priv *priv = mlx4_priv(dev);
2411 struct mlx4_slave_event_eq_info *event_eq;
2412 struct mlx4_cmd_mailbox *mailbox;
2413 u32 in_modifier = 0;
2418 if (!priv->mfunc.master.slave_state)
2421 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2423 /* Create the event only if the slave is registered */
2424 if (event_eq->eqn < 0)
2427 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2428 res_id = (slave << 8) | event_eq->eqn;
2429 err = get_res(dev, slave, res_id, RES_EQ, &req);
2433 if (req->com.from_state != RES_EQ_HW) {
2438 mailbox = mlx4_alloc_cmd_mailbox(dev);
2439 if (IS_ERR(mailbox)) {
2440 err = PTR_ERR(mailbox);
2444 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2446 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2449 memcpy(mailbox->buf, (u8 *) eqe, 28);
2451 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2453 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2454 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2457 put_res(dev, slave, res_id, RES_EQ);
2458 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2459 mlx4_free_cmd_mailbox(dev, mailbox);
2463 put_res(dev, slave, res_id, RES_EQ);
2466 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2470 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2471 struct mlx4_vhcr *vhcr,
2472 struct mlx4_cmd_mailbox *inbox,
2473 struct mlx4_cmd_mailbox *outbox,
2474 struct mlx4_cmd_info *cmd)
2476 int eqn = vhcr->in_modifier;
2477 int res_id = eqn | (slave << 8);
2481 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2485 if (eq->com.from_state != RES_EQ_HW) {
2490 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2493 put_res(dev, slave, res_id, RES_EQ);
2497 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2498 struct mlx4_vhcr *vhcr,
2499 struct mlx4_cmd_mailbox *inbox,
2500 struct mlx4_cmd_mailbox *outbox,
2501 struct mlx4_cmd_info *cmd)
2504 int cqn = vhcr->in_modifier;
2505 struct mlx4_cq_context *cqc = inbox->buf;
2506 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2508 struct res_mtt *mtt;
2510 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2513 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2516 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2519 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2522 atomic_inc(&mtt->ref_count);
2524 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2525 res_end_move(dev, slave, RES_CQ, cqn);
2529 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2531 res_abort_move(dev, slave, RES_CQ, cqn);
2535 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2536 struct mlx4_vhcr *vhcr,
2537 struct mlx4_cmd_mailbox *inbox,
2538 struct mlx4_cmd_mailbox *outbox,
2539 struct mlx4_cmd_info *cmd)
2542 int cqn = vhcr->in_modifier;
2545 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2548 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2551 atomic_dec(&cq->mtt->ref_count);
2552 res_end_move(dev, slave, RES_CQ, cqn);
2556 res_abort_move(dev, slave, RES_CQ, cqn);
2560 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2561 struct mlx4_vhcr *vhcr,
2562 struct mlx4_cmd_mailbox *inbox,
2563 struct mlx4_cmd_mailbox *outbox,
2564 struct mlx4_cmd_info *cmd)
2566 int cqn = vhcr->in_modifier;
2570 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2574 if (cq->com.from_state != RES_CQ_HW)
2577 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2579 put_res(dev, slave, cqn, RES_CQ);
2584 static int handle_resize(struct mlx4_dev *dev, int slave,
2585 struct mlx4_vhcr *vhcr,
2586 struct mlx4_cmd_mailbox *inbox,
2587 struct mlx4_cmd_mailbox *outbox,
2588 struct mlx4_cmd_info *cmd,
2592 struct res_mtt *orig_mtt;
2593 struct res_mtt *mtt;
2594 struct mlx4_cq_context *cqc = inbox->buf;
2595 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2597 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2601 if (orig_mtt != cq->mtt) {
2606 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2610 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2613 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2616 atomic_dec(&orig_mtt->ref_count);
2617 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2618 atomic_inc(&mtt->ref_count);
2620 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2624 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2626 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2632 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2633 struct mlx4_vhcr *vhcr,
2634 struct mlx4_cmd_mailbox *inbox,
2635 struct mlx4_cmd_mailbox *outbox,
2636 struct mlx4_cmd_info *cmd)
2638 int cqn = vhcr->in_modifier;
2642 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2646 if (cq->com.from_state != RES_CQ_HW)
2649 if (vhcr->op_modifier == 0) {
2650 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2654 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2656 put_res(dev, slave, cqn, RES_CQ);
2661 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2663 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2664 int log_rq_stride = srqc->logstride & 7;
2665 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2667 if (log_srq_size + log_rq_stride + 4 < page_shift)
2670 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2673 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2674 struct mlx4_vhcr *vhcr,
2675 struct mlx4_cmd_mailbox *inbox,
2676 struct mlx4_cmd_mailbox *outbox,
2677 struct mlx4_cmd_info *cmd)
2680 int srqn = vhcr->in_modifier;
2681 struct res_mtt *mtt;
2682 struct res_srq *srq;
2683 struct mlx4_srq_context *srqc = inbox->buf;
2684 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2686 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2689 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2692 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2695 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2700 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2704 atomic_inc(&mtt->ref_count);
2706 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2707 res_end_move(dev, slave, RES_SRQ, srqn);
2711 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2713 res_abort_move(dev, slave, RES_SRQ, srqn);
2718 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2719 struct mlx4_vhcr *vhcr,
2720 struct mlx4_cmd_mailbox *inbox,
2721 struct mlx4_cmd_mailbox *outbox,
2722 struct mlx4_cmd_info *cmd)
2725 int srqn = vhcr->in_modifier;
2726 struct res_srq *srq;
2728 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2731 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2734 atomic_dec(&srq->mtt->ref_count);
2736 atomic_dec(&srq->cq->ref_count);
2737 res_end_move(dev, slave, RES_SRQ, srqn);
2742 res_abort_move(dev, slave, RES_SRQ, srqn);
2747 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2748 struct mlx4_vhcr *vhcr,
2749 struct mlx4_cmd_mailbox *inbox,
2750 struct mlx4_cmd_mailbox *outbox,
2751 struct mlx4_cmd_info *cmd)
2754 int srqn = vhcr->in_modifier;
2755 struct res_srq *srq;
2757 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2760 if (srq->com.from_state != RES_SRQ_HW) {
2764 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2766 put_res(dev, slave, srqn, RES_SRQ);
2770 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2771 struct mlx4_vhcr *vhcr,
2772 struct mlx4_cmd_mailbox *inbox,
2773 struct mlx4_cmd_mailbox *outbox,
2774 struct mlx4_cmd_info *cmd)
2777 int srqn = vhcr->in_modifier;
2778 struct res_srq *srq;
2780 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2784 if (srq->com.from_state != RES_SRQ_HW) {
2789 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2791 put_res(dev, slave, srqn, RES_SRQ);
2795 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2796 struct mlx4_vhcr *vhcr,
2797 struct mlx4_cmd_mailbox *inbox,
2798 struct mlx4_cmd_mailbox *outbox,
2799 struct mlx4_cmd_info *cmd)
2802 int qpn = vhcr->in_modifier & 0x7fffff;
2805 err = get_res(dev, slave, qpn, RES_QP, &qp);
2808 if (qp->com.from_state != RES_QP_HW) {
2813 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2815 put_res(dev, slave, qpn, RES_QP);
2819 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2820 struct mlx4_vhcr *vhcr,
2821 struct mlx4_cmd_mailbox *inbox,
2822 struct mlx4_cmd_mailbox *outbox,
2823 struct mlx4_cmd_info *cmd)
2825 struct mlx4_qp_context *context = inbox->buf + 8;
2826 adjust_proxy_tun_qkey(dev, vhcr, context);
2827 update_pkey_index(dev, slave, inbox);
2828 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2831 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2832 struct mlx4_vhcr *vhcr,
2833 struct mlx4_cmd_mailbox *inbox,
2834 struct mlx4_cmd_mailbox *outbox,
2835 struct mlx4_cmd_info *cmd)
2838 struct mlx4_qp_context *qpc = inbox->buf + 8;
2840 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2844 update_pkey_index(dev, slave, inbox);
2845 update_gid(dev, inbox, (u8)slave);
2846 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2847 err = update_vport_qp_param(dev, inbox, slave);
2851 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2854 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2855 struct mlx4_vhcr *vhcr,
2856 struct mlx4_cmd_mailbox *inbox,
2857 struct mlx4_cmd_mailbox *outbox,
2858 struct mlx4_cmd_info *cmd)
2861 struct mlx4_qp_context *context = inbox->buf + 8;
2863 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2867 update_pkey_index(dev, slave, inbox);
2868 update_gid(dev, inbox, (u8)slave);
2869 adjust_proxy_tun_qkey(dev, vhcr, context);
2870 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2873 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2874 struct mlx4_vhcr *vhcr,
2875 struct mlx4_cmd_mailbox *inbox,
2876 struct mlx4_cmd_mailbox *outbox,
2877 struct mlx4_cmd_info *cmd)
2880 struct mlx4_qp_context *context = inbox->buf + 8;
2882 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2886 update_pkey_index(dev, slave, inbox);
2887 update_gid(dev, inbox, (u8)slave);
2888 adjust_proxy_tun_qkey(dev, vhcr, context);
2889 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2893 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2894 struct mlx4_vhcr *vhcr,
2895 struct mlx4_cmd_mailbox *inbox,
2896 struct mlx4_cmd_mailbox *outbox,
2897 struct mlx4_cmd_info *cmd)
2899 struct mlx4_qp_context *context = inbox->buf + 8;
2900 adjust_proxy_tun_qkey(dev, vhcr, context);
2901 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2904 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2905 struct mlx4_vhcr *vhcr,
2906 struct mlx4_cmd_mailbox *inbox,
2907 struct mlx4_cmd_mailbox *outbox,
2908 struct mlx4_cmd_info *cmd)
2911 struct mlx4_qp_context *context = inbox->buf + 8;
2913 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2917 adjust_proxy_tun_qkey(dev, vhcr, context);
2918 update_gid(dev, inbox, (u8)slave);
2919 update_pkey_index(dev, slave, inbox);
2920 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2923 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2924 struct mlx4_vhcr *vhcr,
2925 struct mlx4_cmd_mailbox *inbox,
2926 struct mlx4_cmd_mailbox *outbox,
2927 struct mlx4_cmd_info *cmd)
2930 struct mlx4_qp_context *context = inbox->buf + 8;
2932 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2936 adjust_proxy_tun_qkey(dev, vhcr, context);
2937 update_gid(dev, inbox, (u8)slave);
2938 update_pkey_index(dev, slave, inbox);
2939 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2942 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2943 struct mlx4_vhcr *vhcr,
2944 struct mlx4_cmd_mailbox *inbox,
2945 struct mlx4_cmd_mailbox *outbox,
2946 struct mlx4_cmd_info *cmd)
2949 int qpn = vhcr->in_modifier & 0x7fffff;
2952 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2955 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2959 atomic_dec(&qp->mtt->ref_count);
2960 atomic_dec(&qp->rcq->ref_count);
2961 atomic_dec(&qp->scq->ref_count);
2963 atomic_dec(&qp->srq->ref_count);
2964 res_end_move(dev, slave, RES_QP, qpn);
2968 res_abort_move(dev, slave, RES_QP, qpn);
2973 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2974 struct res_qp *rqp, u8 *gid)
2976 struct res_gid *res;
2978 list_for_each_entry(res, &rqp->mcg_list, list) {
2979 if (!memcmp(res->gid, gid, 16))
2985 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2986 u8 *gid, enum mlx4_protocol prot,
2987 enum mlx4_steer_type steer, u64 reg_id)
2989 struct res_gid *res;
2992 res = kzalloc(sizeof *res, GFP_KERNEL);
2996 spin_lock_irq(&rqp->mcg_spl);
2997 if (find_gid(dev, slave, rqp, gid)) {
3001 memcpy(res->gid, gid, 16);
3004 res->reg_id = reg_id;
3005 list_add_tail(&res->list, &rqp->mcg_list);
3008 spin_unlock_irq(&rqp->mcg_spl);
3013 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3014 u8 *gid, enum mlx4_protocol prot,
3015 enum mlx4_steer_type steer, u64 *reg_id)
3017 struct res_gid *res;
3020 spin_lock_irq(&rqp->mcg_spl);
3021 res = find_gid(dev, slave, rqp, gid);
3022 if (!res || res->prot != prot || res->steer != steer)
3025 *reg_id = res->reg_id;
3026 list_del(&res->list);
3030 spin_unlock_irq(&rqp->mcg_spl);
3035 static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3036 int block_loopback, enum mlx4_protocol prot,
3037 enum mlx4_steer_type type, u64 *reg_id)
3039 switch (dev->caps.steering_mode) {
3040 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3041 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3042 block_loopback, prot,
3044 case MLX4_STEERING_MODE_B0:
3045 return mlx4_qp_attach_common(dev, qp, gid,
3046 block_loopback, prot, type);
3052 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3053 enum mlx4_protocol prot, enum mlx4_steer_type type,
3056 switch (dev->caps.steering_mode) {
3057 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3058 return mlx4_flow_detach(dev, reg_id);
3059 case MLX4_STEERING_MODE_B0:
3060 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3066 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3067 struct mlx4_vhcr *vhcr,
3068 struct mlx4_cmd_mailbox *inbox,
3069 struct mlx4_cmd_mailbox *outbox,
3070 struct mlx4_cmd_info *cmd)
3072 struct mlx4_qp qp; /* dummy for calling attach/detach */
3073 u8 *gid = inbox->buf;
3074 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3079 int attach = vhcr->op_modifier;
3080 int block_loopback = vhcr->in_modifier >> 31;
3081 u8 steer_type_mask = 2;
3082 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3084 qpn = vhcr->in_modifier & 0xffffff;
3085 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3091 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3094 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3097 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3101 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
3105 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3107 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3110 put_res(dev, slave, qpn, RES_QP);
3114 qp_detach(dev, &qp, gid, prot, type, reg_id);
3116 put_res(dev, slave, qpn, RES_QP);
3121 * MAC validation for Flow Steering rules.
3122 * VF can attach rules only with a mac address which is assigned to it.
3124 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3125 struct list_head *rlist)
3127 struct mac_res *res, *tmp;
3130 /* make sure it isn't multicast or broadcast mac*/
3131 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3132 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3133 list_for_each_entry_safe(res, tmp, rlist, list) {
3134 be_mac = cpu_to_be64(res->mac << 16);
3135 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3138 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3139 eth_header->eth.dst_mac, slave);
3146 * In case of missing eth header, append eth header with a MAC address
3147 * assigned to the VF.
3149 static int add_eth_header(struct mlx4_dev *dev, int slave,
3150 struct mlx4_cmd_mailbox *inbox,
3151 struct list_head *rlist, int header_id)
3153 struct mac_res *res, *tmp;
3155 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3156 struct mlx4_net_trans_rule_hw_eth *eth_header;
3157 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3158 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3160 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3162 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3164 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3166 /* Clear a space in the inbox for eth header */
3167 switch (header_id) {
3168 case MLX4_NET_TRANS_RULE_ID_IPV4:
3170 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3171 memmove(ip_header, eth_header,
3172 sizeof(*ip_header) + sizeof(*l4_header));
3174 case MLX4_NET_TRANS_RULE_ID_TCP:
3175 case MLX4_NET_TRANS_RULE_ID_UDP:
3176 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3178 memmove(l4_header, eth_header, sizeof(*l4_header));
3183 list_for_each_entry_safe(res, tmp, rlist, list) {
3184 if (port == res->port) {
3185 be_mac = cpu_to_be64(res->mac << 16);
3190 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3195 memset(eth_header, 0, sizeof(*eth_header));
3196 eth_header->size = sizeof(*eth_header) >> 2;
3197 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3198 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3199 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3205 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3206 struct mlx4_vhcr *vhcr,
3207 struct mlx4_cmd_mailbox *inbox,
3208 struct mlx4_cmd_mailbox *outbox,
3209 struct mlx4_cmd_info *cmd)
3212 struct mlx4_priv *priv = mlx4_priv(dev);
3213 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3214 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3218 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3219 struct _rule_hw *rule_header;
3222 if (dev->caps.steering_mode !=
3223 MLX4_STEERING_MODE_DEVICE_MANAGED)
3226 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3227 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3228 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3230 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3233 rule_header = (struct _rule_hw *)(ctrl + 1);
3234 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3236 switch (header_id) {
3237 case MLX4_NET_TRANS_RULE_ID_ETH:
3238 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3243 case MLX4_NET_TRANS_RULE_ID_IB:
3245 case MLX4_NET_TRANS_RULE_ID_IPV4:
3246 case MLX4_NET_TRANS_RULE_ID_TCP:
3247 case MLX4_NET_TRANS_RULE_ID_UDP:
3248 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3249 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3253 vhcr->in_modifier +=
3254 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3257 pr_err("Corrupted mailbox.\n");
3262 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3263 vhcr->in_modifier, 0,
3264 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3269 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3271 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3273 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3274 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3278 atomic_inc(&rqp->ref_count);
3280 put_res(dev, slave, qpn, RES_QP);
3284 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3285 struct mlx4_vhcr *vhcr,
3286 struct mlx4_cmd_mailbox *inbox,
3287 struct mlx4_cmd_mailbox *outbox,
3288 struct mlx4_cmd_info *cmd)
3292 struct res_fs_rule *rrule;
3294 if (dev->caps.steering_mode !=
3295 MLX4_STEERING_MODE_DEVICE_MANAGED)
3298 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3301 /* Release the rule form busy state before removal */
3302 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3303 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3307 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3309 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3313 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3314 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3317 atomic_dec(&rqp->ref_count);
3319 put_res(dev, slave, rrule->qpn, RES_QP);
3324 BUSY_MAX_RETRIES = 10
3327 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3328 struct mlx4_vhcr *vhcr,
3329 struct mlx4_cmd_mailbox *inbox,
3330 struct mlx4_cmd_mailbox *outbox,
3331 struct mlx4_cmd_info *cmd)
3334 int index = vhcr->in_modifier & 0xffff;
3336 err = get_res(dev, slave, index, RES_COUNTER, NULL);
3340 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3341 put_res(dev, slave, index, RES_COUNTER);
3345 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3347 struct res_gid *rgid;
3348 struct res_gid *tmp;
3349 struct mlx4_qp qp; /* dummy for calling attach/detach */
3351 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3352 switch (dev->caps.steering_mode) {
3353 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3354 mlx4_flow_detach(dev, rgid->reg_id);
3356 case MLX4_STEERING_MODE_B0:
3357 qp.qpn = rqp->local_qpn;
3358 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3359 rgid->prot, rgid->steer);
3362 list_del(&rgid->list);
3367 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3368 enum mlx4_resource type, int print)
3370 struct mlx4_priv *priv = mlx4_priv(dev);
3371 struct mlx4_resource_tracker *tracker =
3372 &priv->mfunc.master.res_tracker;
3373 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3374 struct res_common *r;
3375 struct res_common *tmp;
3379 spin_lock_irq(mlx4_tlock(dev));
3380 list_for_each_entry_safe(r, tmp, rlist, list) {
3381 if (r->owner == slave) {
3383 if (r->state == RES_ANY_BUSY) {
3386 "%s id 0x%llx is busy\n",
3391 r->from_state = r->state;
3392 r->state = RES_ANY_BUSY;
3398 spin_unlock_irq(mlx4_tlock(dev));
3403 static int move_all_busy(struct mlx4_dev *dev, int slave,
3404 enum mlx4_resource type)
3406 unsigned long begin;
3411 busy = _move_all_busy(dev, slave, type, 0);
3412 if (time_after(jiffies, begin + 5 * HZ))
3419 busy = _move_all_busy(dev, slave, type, 1);
3423 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3425 struct mlx4_priv *priv = mlx4_priv(dev);
3426 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3427 struct list_head *qp_list =
3428 &tracker->slave_list[slave].res_list[RES_QP];
3436 err = move_all_busy(dev, slave, RES_QP);
3438 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3439 "for slave %d\n", slave);
3441 spin_lock_irq(mlx4_tlock(dev));
3442 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3443 spin_unlock_irq(mlx4_tlock(dev));
3444 if (qp->com.owner == slave) {
3445 qpn = qp->com.res_id;
3446 detach_qp(dev, slave, qp);
3447 state = qp->com.from_state;
3448 while (state != 0) {
3450 case RES_QP_RESERVED:
3451 spin_lock_irq(mlx4_tlock(dev));
3452 rb_erase(&qp->com.node,
3453 &tracker->res_tree[RES_QP]);
3454 list_del(&qp->com.list);
3455 spin_unlock_irq(mlx4_tlock(dev));
3460 if (!valid_reserved(dev, slave, qpn))
3461 __mlx4_qp_free_icm(dev, qpn);
3462 state = RES_QP_RESERVED;
3466 err = mlx4_cmd(dev, in_param,
3469 MLX4_CMD_TIME_CLASS_A,
3472 mlx4_dbg(dev, "rem_slave_qps: failed"
3473 " to move slave %d qpn %d to"
3476 atomic_dec(&qp->rcq->ref_count);
3477 atomic_dec(&qp->scq->ref_count);
3478 atomic_dec(&qp->mtt->ref_count);
3480 atomic_dec(&qp->srq->ref_count);
3481 state = RES_QP_MAPPED;
3488 spin_lock_irq(mlx4_tlock(dev));
3490 spin_unlock_irq(mlx4_tlock(dev));
3493 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3495 struct mlx4_priv *priv = mlx4_priv(dev);
3496 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3497 struct list_head *srq_list =
3498 &tracker->slave_list[slave].res_list[RES_SRQ];
3499 struct res_srq *srq;
3500 struct res_srq *tmp;
3507 err = move_all_busy(dev, slave, RES_SRQ);
3509 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3510 "busy for slave %d\n", slave);
3512 spin_lock_irq(mlx4_tlock(dev));
3513 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3514 spin_unlock_irq(mlx4_tlock(dev));
3515 if (srq->com.owner == slave) {
3516 srqn = srq->com.res_id;
3517 state = srq->com.from_state;
3518 while (state != 0) {
3520 case RES_SRQ_ALLOCATED:
3521 __mlx4_srq_free_icm(dev, srqn);
3522 spin_lock_irq(mlx4_tlock(dev));
3523 rb_erase(&srq->com.node,
3524 &tracker->res_tree[RES_SRQ]);
3525 list_del(&srq->com.list);
3526 spin_unlock_irq(mlx4_tlock(dev));
3533 err = mlx4_cmd(dev, in_param, srqn, 1,
3535 MLX4_CMD_TIME_CLASS_A,
3538 mlx4_dbg(dev, "rem_slave_srqs: failed"
3539 " to move slave %d srq %d to"
3543 atomic_dec(&srq->mtt->ref_count);
3545 atomic_dec(&srq->cq->ref_count);
3546 state = RES_SRQ_ALLOCATED;
3554 spin_lock_irq(mlx4_tlock(dev));
3556 spin_unlock_irq(mlx4_tlock(dev));
3559 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3561 struct mlx4_priv *priv = mlx4_priv(dev);
3562 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3563 struct list_head *cq_list =
3564 &tracker->slave_list[slave].res_list[RES_CQ];
3573 err = move_all_busy(dev, slave, RES_CQ);
3575 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3576 "busy for slave %d\n", slave);
3578 spin_lock_irq(mlx4_tlock(dev));
3579 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3580 spin_unlock_irq(mlx4_tlock(dev));
3581 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3582 cqn = cq->com.res_id;
3583 state = cq->com.from_state;
3584 while (state != 0) {
3586 case RES_CQ_ALLOCATED:
3587 __mlx4_cq_free_icm(dev, cqn);
3588 spin_lock_irq(mlx4_tlock(dev));
3589 rb_erase(&cq->com.node,
3590 &tracker->res_tree[RES_CQ]);
3591 list_del(&cq->com.list);
3592 spin_unlock_irq(mlx4_tlock(dev));
3599 err = mlx4_cmd(dev, in_param, cqn, 1,
3601 MLX4_CMD_TIME_CLASS_A,
3604 mlx4_dbg(dev, "rem_slave_cqs: failed"
3605 " to move slave %d cq %d to"
3608 atomic_dec(&cq->mtt->ref_count);
3609 state = RES_CQ_ALLOCATED;
3617 spin_lock_irq(mlx4_tlock(dev));
3619 spin_unlock_irq(mlx4_tlock(dev));
3622 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3624 struct mlx4_priv *priv = mlx4_priv(dev);
3625 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3626 struct list_head *mpt_list =
3627 &tracker->slave_list[slave].res_list[RES_MPT];
3628 struct res_mpt *mpt;
3629 struct res_mpt *tmp;
3636 err = move_all_busy(dev, slave, RES_MPT);
3638 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3639 "busy for slave %d\n", slave);
3641 spin_lock_irq(mlx4_tlock(dev));
3642 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3643 spin_unlock_irq(mlx4_tlock(dev));
3644 if (mpt->com.owner == slave) {
3645 mptn = mpt->com.res_id;
3646 state = mpt->com.from_state;
3647 while (state != 0) {
3649 case RES_MPT_RESERVED:
3650 __mlx4_mpt_release(dev, mpt->key);
3651 spin_lock_irq(mlx4_tlock(dev));
3652 rb_erase(&mpt->com.node,
3653 &tracker->res_tree[RES_MPT]);
3654 list_del(&mpt->com.list);
3655 spin_unlock_irq(mlx4_tlock(dev));
3660 case RES_MPT_MAPPED:
3661 __mlx4_mpt_free_icm(dev, mpt->key);
3662 state = RES_MPT_RESERVED;
3667 err = mlx4_cmd(dev, in_param, mptn, 0,
3669 MLX4_CMD_TIME_CLASS_A,
3672 mlx4_dbg(dev, "rem_slave_mrs: failed"
3673 " to move slave %d mpt %d to"
3677 atomic_dec(&mpt->mtt->ref_count);
3678 state = RES_MPT_MAPPED;
3685 spin_lock_irq(mlx4_tlock(dev));
3687 spin_unlock_irq(mlx4_tlock(dev));
3690 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3692 struct mlx4_priv *priv = mlx4_priv(dev);
3693 struct mlx4_resource_tracker *tracker =
3694 &priv->mfunc.master.res_tracker;
3695 struct list_head *mtt_list =
3696 &tracker->slave_list[slave].res_list[RES_MTT];
3697 struct res_mtt *mtt;
3698 struct res_mtt *tmp;
3704 err = move_all_busy(dev, slave, RES_MTT);
3706 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3707 "busy for slave %d\n", slave);
3709 spin_lock_irq(mlx4_tlock(dev));
3710 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3711 spin_unlock_irq(mlx4_tlock(dev));
3712 if (mtt->com.owner == slave) {
3713 base = mtt->com.res_id;
3714 state = mtt->com.from_state;
3715 while (state != 0) {
3717 case RES_MTT_ALLOCATED:
3718 __mlx4_free_mtt_range(dev, base,
3720 spin_lock_irq(mlx4_tlock(dev));
3721 rb_erase(&mtt->com.node,
3722 &tracker->res_tree[RES_MTT]);
3723 list_del(&mtt->com.list);
3724 spin_unlock_irq(mlx4_tlock(dev));
3734 spin_lock_irq(mlx4_tlock(dev));
3736 spin_unlock_irq(mlx4_tlock(dev));
3739 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3741 struct mlx4_priv *priv = mlx4_priv(dev);
3742 struct mlx4_resource_tracker *tracker =
3743 &priv->mfunc.master.res_tracker;
3744 struct list_head *fs_rule_list =
3745 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3746 struct res_fs_rule *fs_rule;
3747 struct res_fs_rule *tmp;
3752 err = move_all_busy(dev, slave, RES_FS_RULE);
3754 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3757 spin_lock_irq(mlx4_tlock(dev));
3758 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3759 spin_unlock_irq(mlx4_tlock(dev));
3760 if (fs_rule->com.owner == slave) {
3761 base = fs_rule->com.res_id;
3762 state = fs_rule->com.from_state;
3763 while (state != 0) {
3765 case RES_FS_RULE_ALLOCATED:
3767 err = mlx4_cmd(dev, base, 0, 0,
3768 MLX4_QP_FLOW_STEERING_DETACH,
3769 MLX4_CMD_TIME_CLASS_A,
3772 spin_lock_irq(mlx4_tlock(dev));
3773 rb_erase(&fs_rule->com.node,
3774 &tracker->res_tree[RES_FS_RULE]);
3775 list_del(&fs_rule->com.list);
3776 spin_unlock_irq(mlx4_tlock(dev));
3786 spin_lock_irq(mlx4_tlock(dev));
3788 spin_unlock_irq(mlx4_tlock(dev));
3791 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3793 struct mlx4_priv *priv = mlx4_priv(dev);
3794 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3795 struct list_head *eq_list =
3796 &tracker->slave_list[slave].res_list[RES_EQ];
3803 struct mlx4_cmd_mailbox *mailbox;
3805 err = move_all_busy(dev, slave, RES_EQ);
3807 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3808 "busy for slave %d\n", slave);
3810 spin_lock_irq(mlx4_tlock(dev));
3811 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3812 spin_unlock_irq(mlx4_tlock(dev));
3813 if (eq->com.owner == slave) {
3814 eqn = eq->com.res_id;
3815 state = eq->com.from_state;
3816 while (state != 0) {
3818 case RES_EQ_RESERVED:
3819 spin_lock_irq(mlx4_tlock(dev));
3820 rb_erase(&eq->com.node,
3821 &tracker->res_tree[RES_EQ]);
3822 list_del(&eq->com.list);
3823 spin_unlock_irq(mlx4_tlock(dev));
3829 mailbox = mlx4_alloc_cmd_mailbox(dev);
3830 if (IS_ERR(mailbox)) {
3834 err = mlx4_cmd_box(dev, slave, 0,
3837 MLX4_CMD_TIME_CLASS_A,
3840 mlx4_dbg(dev, "rem_slave_eqs: failed"
3841 " to move slave %d eqs %d to"
3842 " SW ownership\n", slave, eqn);
3843 mlx4_free_cmd_mailbox(dev, mailbox);
3844 atomic_dec(&eq->mtt->ref_count);
3845 state = RES_EQ_RESERVED;
3853 spin_lock_irq(mlx4_tlock(dev));
3855 spin_unlock_irq(mlx4_tlock(dev));
3858 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3860 struct mlx4_priv *priv = mlx4_priv(dev);
3861 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3862 struct list_head *counter_list =
3863 &tracker->slave_list[slave].res_list[RES_COUNTER];
3864 struct res_counter *counter;
3865 struct res_counter *tmp;
3869 err = move_all_busy(dev, slave, RES_COUNTER);
3871 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3872 "busy for slave %d\n", slave);
3874 spin_lock_irq(mlx4_tlock(dev));
3875 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3876 if (counter->com.owner == slave) {
3877 index = counter->com.res_id;
3878 rb_erase(&counter->com.node,
3879 &tracker->res_tree[RES_COUNTER]);
3880 list_del(&counter->com.list);
3882 __mlx4_counter_free(dev, index);
3885 spin_unlock_irq(mlx4_tlock(dev));
3888 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3890 struct mlx4_priv *priv = mlx4_priv(dev);
3891 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3892 struct list_head *xrcdn_list =
3893 &tracker->slave_list[slave].res_list[RES_XRCD];
3894 struct res_xrcdn *xrcd;
3895 struct res_xrcdn *tmp;
3899 err = move_all_busy(dev, slave, RES_XRCD);
3901 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3902 "busy for slave %d\n", slave);
3904 spin_lock_irq(mlx4_tlock(dev));
3905 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3906 if (xrcd->com.owner == slave) {
3907 xrcdn = xrcd->com.res_id;
3908 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3909 list_del(&xrcd->com.list);
3911 __mlx4_xrcd_free(dev, xrcdn);
3914 spin_unlock_irq(mlx4_tlock(dev));
3917 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3919 struct mlx4_priv *priv = mlx4_priv(dev);
3921 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3923 rem_slave_macs(dev, slave);
3924 rem_slave_fs_rule(dev, slave);
3925 rem_slave_qps(dev, slave);
3926 rem_slave_srqs(dev, slave);
3927 rem_slave_cqs(dev, slave);
3928 rem_slave_mrs(dev, slave);
3929 rem_slave_eqs(dev, slave);
3930 rem_slave_mtts(dev, slave);
3931 rem_slave_counters(dev, slave);
3932 rem_slave_xrcdns(dev, slave);
3933 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);