1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
4 #include <linux/types.h>
5 #include <linux/module.h>
7 #include <linux/netdevice.h>
8 #include <linux/vmalloc.h>
9 #include <linux/string.h>
12 #include <linux/tcp.h>
13 #include <linux/ipv6.h>
14 #include <linux/if_bridge.h>
15 #ifdef NETIF_F_HW_VLAN_CTAG_TX
16 #include <linux/if_vlan.h>
20 #include "ixgbe_type.h"
21 #include "ixgbe_sriov.h"
24 static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
27 struct ixgbe_hw *hw = &adapter->hw;
28 struct vf_macvlans *mv_list;
29 int num_vf_macvlans, i;
31 num_vf_macvlans = hw->mac.num_rar_entries -
32 (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
36 mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
39 /* Initialize list of VF macvlans */
40 INIT_LIST_HEAD(&adapter->vf_mvs.l);
41 for (i = 0; i < num_vf_macvlans; i++) {
43 mv_list[i].free = true;
44 list_add(&mv_list[i].l, &adapter->vf_mvs.l);
46 adapter->mv_list = mv_list;
50 static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
53 struct ixgbe_hw *hw = &adapter->hw;
56 if (adapter->xdp_prog) {
57 e_warn(probe, "SRIOV is not supported with XDP\n");
61 /* Enable VMDq flag so device will be set in VM mode */
62 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
63 IXGBE_FLAG_VMDQ_ENABLED;
65 /* Allocate memory for per VF control structures */
66 adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
71 adapter->num_vfs = num_vfs;
73 ixgbe_alloc_vf_macvlans(adapter, num_vfs);
74 adapter->ring_feature[RING_F_VMDQ].offset = num_vfs;
76 /* Initialize default switching mode VEB */
77 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
78 adapter->bridge_mode = BRIDGE_MODE_VEB;
80 /* limit trafffic classes based on VFs enabled */
81 if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) {
82 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
83 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
84 } else if (num_vfs < 32) {
85 adapter->dcb_cfg.num_tcs.pg_tcs = 4;
86 adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
88 adapter->dcb_cfg.num_tcs.pg_tcs = 1;
89 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
92 /* Disable RSC when in SR-IOV mode */
93 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
94 IXGBE_FLAG2_RSC_ENABLED);
96 for (i = 0; i < num_vfs; i++) {
97 /* enable spoof checking for all VFs */
98 adapter->vfinfo[i].spoofchk_enabled = true;
100 /* We support VF RSS querying only for 82599 and x540
101 * devices at the moment. These devices share RSS
102 * indirection table and RSS hash key with PF therefore
103 * we want to disable the querying by default.
105 adapter->vfinfo[i].rss_query_enabled = 0;
107 /* Untrust all VFs */
108 adapter->vfinfo[i].trusted = false;
110 /* set the default xcast mode */
111 adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
114 e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs);
119 * ixgbe_get_vfs - Find and take references to all vf devices
120 * @adapter: Pointer to adapter struct
122 static void ixgbe_get_vfs(struct ixgbe_adapter *adapter)
124 struct pci_dev *pdev = adapter->pdev;
125 u16 vendor = pdev->vendor;
126 struct pci_dev *vfdev;
131 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
134 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
136 vfdev = pci_get_device(vendor, vf_id, NULL);
137 for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) {
138 if (!vfdev->is_virtfn)
140 if (vfdev->physfn != pdev)
142 if (vf >= adapter->num_vfs)
145 adapter->vfinfo[vf].vfdev = vfdev;
150 /* Note this function is called when the user wants to enable SR-IOV
151 * VFs using the now deprecated module parameter
153 void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
155 int pre_existing_vfs = 0;
156 unsigned int num_vfs;
158 pre_existing_vfs = pci_num_vf(adapter->pdev);
159 if (!pre_existing_vfs && !max_vfs)
162 /* If there are pre-existing VFs then we have to force
163 * use of that many - over ride any module parameter value.
164 * This may result from the user unloading the PF driver
165 * while VFs were assigned to guest VMs or because the VFs
166 * have been created via the new PCI SR-IOV sysfs interface.
168 if (pre_existing_vfs) {
169 num_vfs = pre_existing_vfs;
170 dev_warn(&adapter->pdev->dev,
171 "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
175 * The 82599 supports up to 64 VFs per physical function
176 * but this implementation limits allocation to 63 so that
177 * basic networking resources are still available to the
178 * physical function. If the user requests greater than
179 * 63 VFs then it is an error - reset to default of zero.
181 num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
183 err = pci_enable_sriov(adapter->pdev, num_vfs);
185 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
190 if (!__ixgbe_enable_sriov(adapter, num_vfs)) {
191 ixgbe_get_vfs(adapter);
195 /* If we have gotten to this point then there is no memory available
196 * to manage the VF devices - print message and bail.
198 e_err(probe, "Unable to allocate memory for VF Data Storage - "
200 ixgbe_disable_sriov(adapter);
203 #endif /* #ifdef CONFIG_PCI_IOV */
204 int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
206 unsigned int num_vfs = adapter->num_vfs, vf;
209 /* set num VFs to 0 to prevent access to vfinfo */
210 adapter->num_vfs = 0;
212 /* put the reference to all of the vf devices */
213 for (vf = 0; vf < num_vfs; ++vf) {
214 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
218 adapter->vfinfo[vf].vfdev = NULL;
222 /* free VF control structures */
223 kfree(adapter->vfinfo);
224 adapter->vfinfo = NULL;
226 /* free macvlan list */
227 kfree(adapter->mv_list);
228 adapter->mv_list = NULL;
230 /* if SR-IOV is already disabled then there is nothing to do */
231 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
234 #ifdef CONFIG_PCI_IOV
236 * If our VFs are assigned we cannot shut down SR-IOV
237 * without causing issues, so just leave the hardware
238 * available but disabled
240 if (pci_vfs_assigned(adapter->pdev)) {
241 e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
244 /* disable iov and allow time for transactions to clear */
245 pci_disable_sriov(adapter->pdev);
248 /* Disable VMDq flag so device will be set in VM mode */
249 if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) {
250 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
251 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
252 rss = min_t(int, ixgbe_max_rss_indices(adapter),
255 rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
258 adapter->ring_feature[RING_F_VMDQ].offset = 0;
259 adapter->ring_feature[RING_F_RSS].limit = rss;
261 /* take a breather then clean up driver data */
266 static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
268 #ifdef CONFIG_PCI_IOV
269 struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
270 int pre_existing_vfs = pci_num_vf(dev);
271 int err = 0, num_rx_pools, i, limit;
274 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
275 err = ixgbe_disable_sriov(adapter);
276 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
282 /* While the SR-IOV capability structure reports total VFs to be 64,
283 * we limit the actual number allocated as below based on two factors.
288 * First, we reserve some transmit/receive resources for the PF.
289 * Second, VMDQ also uses the same pools that SR-IOV does. We need to
290 * account for this, so that we don't accidentally allocate more VFs
291 * than we have available pools. The PCI bus driver already checks for
292 * other values out of range.
294 num_tc = adapter->hw_tcs;
295 num_rx_pools = bitmap_weight(adapter->fwd_bitmask,
296 adapter->num_rx_pools);
297 limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
298 (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
300 if (num_vfs > (limit - num_rx_pools)) {
301 e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n",
302 num_tc, num_rx_pools - 1, limit - num_rx_pools);
306 err = __ixgbe_enable_sriov(adapter, num_vfs);
310 for (i = 0; i < num_vfs; i++)
311 ixgbe_vf_configuration(dev, (i | 0x10000000));
313 /* reset before enabling SRIOV to avoid mailbox issues */
314 ixgbe_sriov_reinit(adapter);
316 err = pci_enable_sriov(dev, num_vfs);
318 e_dev_warn("Failed to enable PCI sriov: %d\n", err);
321 ixgbe_get_vfs(adapter);
329 static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
331 struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
333 #ifdef CONFIG_PCI_IOV
334 u32 current_flags = adapter->flags;
335 int prev_num_vf = pci_num_vf(dev);
338 err = ixgbe_disable_sriov(adapter);
340 /* Only reinit if no error and state changed */
341 #ifdef CONFIG_PCI_IOV
342 if (!err && (current_flags != adapter->flags ||
343 prev_num_vf != pci_num_vf(dev)))
344 ixgbe_sriov_reinit(adapter);
350 int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
353 return ixgbe_pci_sriov_disable(dev);
355 return ixgbe_pci_sriov_enable(dev, num_vfs);
358 static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
361 int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
362 >> IXGBE_VT_MSGINFO_SHIFT;
363 u16 *hash_list = (u16 *)&msgbuf[1];
364 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
365 struct ixgbe_hw *hw = &adapter->hw;
370 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
372 /* only so many hash values supported */
373 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
376 * salt away the number of multi cast addresses assigned
377 * to this VF for later use to restore when the PF multi cast
380 vfinfo->num_vf_mc_hashes = entries;
383 * VFs are limited to using the MTA hash table for their multicast
386 for (i = 0; i < entries; i++) {
387 vfinfo->vf_mc_hashes[i] = hash_list[i];
390 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
391 vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
392 vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
393 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
394 mta_reg |= BIT(vector_bit);
395 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
397 vmolr |= IXGBE_VMOLR_ROMPE;
398 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
403 #ifdef CONFIG_PCI_IOV
404 void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
406 struct ixgbe_hw *hw = &adapter->hw;
407 struct vf_data_storage *vfinfo;
413 for (i = 0; i < adapter->num_vfs; i++) {
414 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
415 vfinfo = &adapter->vfinfo[i];
416 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
417 hw->addr_ctrl.mta_in_use++;
418 vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
419 vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
420 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
421 mta_reg |= BIT(vector_bit);
422 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
425 if (vfinfo->num_vf_mc_hashes)
426 vmolr |= IXGBE_VMOLR_ROMPE;
428 vmolr &= ~IXGBE_VMOLR_ROMPE;
429 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
432 /* Restore any VF macvlans */
433 ixgbe_full_sync_mac_table(adapter);
437 static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
440 struct ixgbe_hw *hw = &adapter->hw;
443 /* If VLAN overlaps with one the PF is currently monitoring make
444 * sure that we are able to allocate a VLVF entry. This may be
445 * redundant but it guarantees PF will maintain visibility to
448 if (add && test_bit(vid, adapter->active_vlans)) {
449 err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false);
454 err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false);
459 /* If we failed to add the VF VLAN or we are removing the VF VLAN
460 * we may need to drop the PF pool bit in order to allow us to free
461 * up the VLVF resources.
463 if (test_bit(vid, adapter->active_vlans) ||
464 (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
465 ixgbe_update_pf_promisc_vlvf(adapter, vid);
470 static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
472 struct ixgbe_hw *hw = &adapter->hw;
473 int max_frame = msgbuf[1];
477 * For 82599EB we have to keep all PFs and VFs operating with
478 * the same max_frame value in order to avoid sending an oversize
479 * frame to a VF. In order to guarantee this is handled correctly
480 * for all cases we have several special exceptions to take into
481 * account before we can enable the VF for receive
483 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
484 struct net_device *dev = adapter->netdev;
485 int pf_max_frame = dev->mtu + ETH_HLEN;
486 u32 reg_offset, vf_shift, vfre;
490 if (dev->features & NETIF_F_FCOE_MTU)
491 pf_max_frame = max_t(int, pf_max_frame,
492 IXGBE_FCOE_JUMBO_FRAME_SIZE);
494 #endif /* CONFIG_FCOE */
495 switch (adapter->vfinfo[vf].vf_api) {
496 case ixgbe_mbox_api_11:
497 case ixgbe_mbox_api_12:
498 case ixgbe_mbox_api_13:
499 /* Version 1.1 supports jumbo frames on VFs if PF has
500 * jumbo frames enabled which means legacy VFs are
503 if (pf_max_frame > ETH_FRAME_LEN)
507 /* If the PF or VF are running w/ jumbo frames enabled
508 * we need to shut down the VF Rx path as we cannot
509 * support jumbo frames on legacy VFs
511 if ((pf_max_frame > ETH_FRAME_LEN) ||
512 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
517 /* determine VF receive enable location */
519 reg_offset = vf / 32;
521 /* enable or disable receive depending on error */
522 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
524 vfre &= ~BIT(vf_shift);
526 vfre |= BIT(vf_shift);
527 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
530 e_err(drv, "VF max_frame %d out of range\n", max_frame);
535 /* MTU < 68 is an error and causes problems on some kernels */
536 if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
537 e_err(drv, "VF max_frame %d out of range\n", max_frame);
541 /* pull current max frame size from hardware */
542 max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
543 max_frs &= IXGBE_MHADD_MFS_MASK;
544 max_frs >>= IXGBE_MHADD_MFS_SHIFT;
546 if (max_frs < max_frame) {
547 max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
548 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
551 e_info(hw, "VF requests change max MTU to %d\n", max_frame);
556 static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
558 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
559 vmolr |= IXGBE_VMOLR_BAM;
561 vmolr |= IXGBE_VMOLR_AUPE;
563 vmolr &= ~IXGBE_VMOLR_AUPE;
564 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
567 static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
569 struct ixgbe_hw *hw = &adapter->hw;
571 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
574 static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
576 struct ixgbe_hw *hw = &adapter->hw;
577 u32 vlvfb_mask, pool_mask, i;
579 /* create mask for VF and other pools */
580 pool_mask = ~BIT(VMDQ_P(0) % 32);
581 vlvfb_mask = BIT(vf % 32);
583 /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
584 for (i = IXGBE_VLVF_ENTRIES; i--;) {
585 u32 bits[2], vlvfb, vid, vfta, vlvf;
586 u32 word = i * 2 + vf / 32;
589 vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
591 /* if our bit isn't set we can skip it */
592 if (!(vlvfb & vlvfb_mask))
595 /* clear our bit from vlvfb */
598 /* create 64b mask to chedk to see if we should clear VLVF */
599 bits[word % 2] = vlvfb;
600 bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1));
602 /* if other pools are present, just remove ourselves */
603 if (bits[(VMDQ_P(0) / 32) ^ 1] ||
604 (bits[VMDQ_P(0) / 32] & pool_mask))
607 /* if PF is present, leave VFTA */
608 if (bits[0] || bits[1])
611 /* if we cannot determine VLAN just remove ourselves */
612 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
616 vid = vlvf & VLAN_VID_MASK;
617 mask = BIT(vid % 32);
619 /* clear bit from VFTA */
620 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
622 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask);
624 /* clear POOL selection enable */
625 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0);
627 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
630 /* clear pool bits */
631 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb);
635 static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
636 int vf, int index, unsigned char *mac_addr)
638 struct vf_macvlans *entry;
639 struct list_head *pos;
643 list_for_each(pos, &adapter->vf_mvs.l) {
644 entry = list_entry(pos, struct vf_macvlans, l);
645 if (entry->vf == vf) {
648 entry->is_macvlan = false;
649 ixgbe_del_mac_filter(adapter,
650 entry->vf_macvlan, vf);
656 * If index was zero then we were asked to clear the uc list
657 * for the VF. We're done.
664 list_for_each(pos, &adapter->vf_mvs.l) {
665 entry = list_entry(pos, struct vf_macvlans, l);
671 * If we traversed the entire list and didn't find a free entry
672 * then we're out of space on the RAR table. Also entry may
673 * be NULL because the original memory allocation for the list
674 * failed, which is not fatal but does mean we can't support
675 * VF requests for MACVLAN because we couldn't allocate
676 * memory for the list management required.
678 if (!entry || !entry->free)
681 retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
686 entry->is_macvlan = true;
688 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
693 static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
695 struct ixgbe_hw *hw = &adapter->hw;
696 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
697 u8 num_tcs = adapter->hw_tcs;
699 /* remove VLAN filters beloning to this VF */
700 ixgbe_clear_vf_vlans(adapter, vf);
702 /* add back PF assigned VLAN or VLAN 0 */
703 ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
705 /* reset offloads to defaults */
706 ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
708 /* set outgoing tags for VFs */
709 if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
710 ixgbe_clear_vmvir(adapter, vf);
712 if (vfinfo->pf_qos || !num_tcs)
713 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
716 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
717 adapter->default_up, vf);
719 if (vfinfo->spoofchk_enabled)
720 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
723 /* reset multicast table array for vf */
724 adapter->vfinfo[vf].num_vf_mc_hashes = 0;
726 /* Flush and reset the mta with the new values */
727 ixgbe_set_rx_mode(adapter->netdev);
729 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
730 ixgbe_set_vf_macvlan(adapter, vf, 0, NULL);
732 /* reset VF api back to unknown */
733 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
736 static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
737 int vf, unsigned char *mac_addr)
741 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
742 retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
744 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr,
747 memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN);
752 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
754 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
755 unsigned int vfn = (event_mask & 0x3f);
757 bool enable = ((event_mask & 0x10000000U) != 0);
760 eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
765 static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
768 struct ixgbe_hw *hw = &adapter->hw;
769 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
770 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
773 for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
776 /* flush previous write */
777 IXGBE_WRITE_FLUSH(hw);
779 /* indicate to hardware that we want to set drop enable */
780 reg = IXGBE_QDE_WRITE | qde;
781 reg |= i << IXGBE_QDE_IDX_SHIFT;
782 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
786 static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
788 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
789 struct ixgbe_hw *hw = &adapter->hw;
790 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
791 u32 reg, reg_offset, vf_shift;
792 u32 msgbuf[4] = {0, 0, 0, 0};
793 u8 *addr = (u8 *)(&msgbuf[1]);
794 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
797 e_info(probe, "VF Reset msg received from vf %d\n", vf);
799 /* reset the filters for the device */
800 ixgbe_vf_reset_event(adapter, vf);
802 /* set vf mac address */
803 if (!is_zero_ether_addr(vf_mac))
804 ixgbe_set_vf_mac(adapter, vf, vf_mac);
807 reg_offset = vf / 32;
809 /* enable transmit for vf */
810 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
811 reg |= BIT(vf_shift);
812 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
814 /* force drop enable for all VF Rx queues */
815 reg = IXGBE_QDE_ENABLE;
816 if (adapter->vfinfo[vf].pf_vlan)
817 reg |= IXGBE_QDE_HIDE_VLAN;
819 ixgbe_write_qde(adapter, vf, reg);
821 /* enable receive for vf */
822 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
823 reg |= BIT(vf_shift);
825 * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
826 * For more info take a look at ixgbe_set_vf_lpe
828 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
829 struct net_device *dev = adapter->netdev;
830 int pf_max_frame = dev->mtu + ETH_HLEN;
833 if (dev->features & NETIF_F_FCOE_MTU)
834 pf_max_frame = max_t(int, pf_max_frame,
835 IXGBE_FCOE_JUMBO_FRAME_SIZE);
837 #endif /* CONFIG_FCOE */
838 if (pf_max_frame > ETH_FRAME_LEN)
839 reg &= ~BIT(vf_shift);
841 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
843 /* enable VF mailbox for further messages */
844 adapter->vfinfo[vf].clear_to_send = true;
846 /* Enable counting of spoofed packets in the SSVPC register */
847 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
848 reg |= BIT(vf_shift);
849 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
852 * Reset the VFs TDWBAL and TDWBAH registers
853 * which are not cleared by an FLR
855 for (i = 0; i < q_per_pool; i++) {
856 IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0);
857 IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0);
860 /* reply to reset with ack and vf mac address */
861 msgbuf[0] = IXGBE_VF_RESET;
862 if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) {
863 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
864 memcpy(addr, vf_mac, ETH_ALEN);
866 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
870 * Piggyback the multicast filter type so VF can compute the
873 msgbuf[3] = hw->mac.mc_filter_type;
874 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
879 static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
882 u8 *new_mac = ((u8 *)(&msgbuf[1]));
884 if (!is_valid_ether_addr(new_mac)) {
885 e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
889 if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
890 !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
892 "VF %d attempted to override administratively set MAC address\n"
893 "Reload the VF driver to resume operations\n",
898 return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
901 static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
904 u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
905 u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
906 u8 tcs = adapter->hw_tcs;
908 if (adapter->vfinfo[vf].pf_vlan || tcs) {
910 "VF %d attempted to override administratively set VLAN configuration\n"
911 "Reload the VF driver to resume operations\n",
916 /* VLAN 0 is a special case, don't allow it to be removed */
920 return ixgbe_set_vf_vlan(adapter, add, vid, vf);
923 static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
926 u8 *new_mac = ((u8 *)(&msgbuf[1]));
927 int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
928 IXGBE_VT_MSGINFO_SHIFT;
931 if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
934 "VF %d requested MACVLAN filter but is administratively denied\n",
939 /* An non-zero index indicates the VF is setting a filter */
941 if (!is_valid_ether_addr(new_mac)) {
942 e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
947 * If the VF is allowed to set MAC filters then turn off
948 * anti-spoofing to avoid false positives.
950 if (adapter->vfinfo[vf].spoofchk_enabled) {
951 struct ixgbe_hw *hw = &adapter->hw;
953 hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
954 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
958 err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
961 "VF %d has requested a MACVLAN filter but there is no space for it\n",
967 static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
973 case ixgbe_mbox_api_10:
974 case ixgbe_mbox_api_11:
975 case ixgbe_mbox_api_12:
976 case ixgbe_mbox_api_13:
977 adapter->vfinfo[vf].vf_api = api;
983 e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
988 static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
991 struct net_device *dev = adapter->netdev;
992 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
993 unsigned int default_tc = 0;
994 u8 num_tcs = adapter->hw_tcs;
996 /* verify the PF is supporting the correct APIs */
997 switch (adapter->vfinfo[vf].vf_api) {
998 case ixgbe_mbox_api_20:
999 case ixgbe_mbox_api_11:
1000 case ixgbe_mbox_api_12:
1001 case ixgbe_mbox_api_13:
1007 /* only allow 1 Tx queue for bandwidth limiting */
1008 msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
1009 msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
1011 /* if TCs > 1 determine which TC belongs to default user priority */
1013 default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
1015 /* notify VF of need for VLAN tag stripping, and correct queue */
1017 msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
1018 else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
1019 msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
1021 msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
1023 /* notify VF of default queue */
1024 msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
1029 static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
1032 u32 *out_buf = &msgbuf[1];
1033 const u8 *reta = adapter->rss_indir_tbl;
1034 u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter);
1036 /* Check if operation is permitted */
1037 if (!adapter->vfinfo[vf].rss_query_enabled)
1040 /* verify the PF is supporting the correct API */
1041 switch (adapter->vfinfo[vf].vf_api) {
1042 case ixgbe_mbox_api_13:
1043 case ixgbe_mbox_api_12:
1049 /* This mailbox command is supported (required) only for 82599 and x540
1050 * VFs which support up to 4 RSS queues. Therefore we will compress the
1051 * RETA by saving only 2 bits from each entry. This way we will be able
1052 * to transfer the whole RETA in a single mailbox operation.
1054 for (i = 0; i < reta_size / 16; i++) {
1056 for (j = 0; j < 16; j++)
1057 out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j);
1063 static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
1064 u32 *msgbuf, u32 vf)
1066 u32 *rss_key = &msgbuf[1];
1068 /* Check if the operation is permitted */
1069 if (!adapter->vfinfo[vf].rss_query_enabled)
1072 /* verify the PF is supporting the correct API */
1073 switch (adapter->vfinfo[vf].vf_api) {
1074 case ixgbe_mbox_api_13:
1075 case ixgbe_mbox_api_12:
1081 memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE);
1086 static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
1087 u32 *msgbuf, u32 vf)
1089 struct ixgbe_hw *hw = &adapter->hw;
1090 int xcast_mode = msgbuf[1];
1091 u32 vmolr, fctrl, disable, enable;
1093 /* verify the PF is supporting the correct APIs */
1094 switch (adapter->vfinfo[vf].vf_api) {
1095 case ixgbe_mbox_api_12:
1096 /* promisc introduced in 1.3 version */
1097 if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
1100 case ixgbe_mbox_api_13:
1106 if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI &&
1107 !adapter->vfinfo[vf].trusted) {
1108 xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
1111 if (adapter->vfinfo[vf].xcast_mode == xcast_mode)
1114 switch (xcast_mode) {
1115 case IXGBEVF_XCAST_MODE_NONE:
1116 disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
1117 IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
1120 case IXGBEVF_XCAST_MODE_MULTI:
1121 disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
1122 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
1124 case IXGBEVF_XCAST_MODE_ALLMULTI:
1125 disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
1126 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
1128 case IXGBEVF_XCAST_MODE_PROMISC:
1129 if (hw->mac.type <= ixgbe_mac_82599EB)
1132 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1133 if (!(fctrl & IXGBE_FCTRL_UPE)) {
1134 /* VF promisc requires PF in promisc */
1136 "Enabling VF promisc requires PF in promisc\n");
1141 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
1142 IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
1148 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
1151 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
1153 adapter->vfinfo[vf].xcast_mode = xcast_mode;
1156 msgbuf[1] = xcast_mode;
1161 static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
1163 u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
1164 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
1165 struct ixgbe_hw *hw = &adapter->hw;
1168 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
1171 pr_err("Error receiving message from VF\n");
1175 /* this is a message we already processed, do nothing */
1176 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
1179 /* flush the ack before we write any messages back */
1180 IXGBE_WRITE_FLUSH(hw);
1182 if (msgbuf[0] == IXGBE_VF_RESET)
1183 return ixgbe_vf_reset_msg(adapter, vf);
1186 * until the vf completes a virtual function reset it should not be
1187 * allowed to start any configuration.
1189 if (!adapter->vfinfo[vf].clear_to_send) {
1190 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
1191 ixgbe_write_mbx(hw, msgbuf, 1, vf);
1195 switch ((msgbuf[0] & 0xFFFF)) {
1196 case IXGBE_VF_SET_MAC_ADDR:
1197 retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
1199 case IXGBE_VF_SET_MULTICAST:
1200 retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
1202 case IXGBE_VF_SET_VLAN:
1203 retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
1205 case IXGBE_VF_SET_LPE:
1206 retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
1208 case IXGBE_VF_SET_MACVLAN:
1209 retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
1211 case IXGBE_VF_API_NEGOTIATE:
1212 retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
1214 case IXGBE_VF_GET_QUEUES:
1215 retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
1217 case IXGBE_VF_GET_RETA:
1218 retval = ixgbe_get_vf_reta(adapter, msgbuf, vf);
1220 case IXGBE_VF_GET_RSS_KEY:
1221 retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf);
1223 case IXGBE_VF_UPDATE_XCAST_MODE:
1224 retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
1227 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
1228 retval = IXGBE_ERR_MBX;
1232 /* notify the VF of the results of what it sent us */
1234 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
1236 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
1238 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
1240 ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
1245 static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
1247 struct ixgbe_hw *hw = &adapter->hw;
1248 u32 msg = IXGBE_VT_MSGTYPE_NACK;
1250 /* if device isn't clear to send it shouldn't be reading either */
1251 if (!adapter->vfinfo[vf].clear_to_send)
1252 ixgbe_write_mbx(hw, &msg, 1, vf);
1255 void ixgbe_msg_task(struct ixgbe_adapter *adapter)
1257 struct ixgbe_hw *hw = &adapter->hw;
1260 for (vf = 0; vf < adapter->num_vfs; vf++) {
1261 /* process any reset requests */
1262 if (!ixgbe_check_for_rst(hw, vf))
1263 ixgbe_vf_reset_event(adapter, vf);
1265 /* process any messages pending */
1266 if (!ixgbe_check_for_msg(hw, vf))
1267 ixgbe_rcv_msg_from_vf(adapter, vf);
1269 /* process any acks */
1270 if (!ixgbe_check_for_ack(hw, vf))
1271 ixgbe_rcv_ack_from_vf(adapter, vf);
1275 void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
1277 struct ixgbe_hw *hw = &adapter->hw;
1279 /* disable transmit and receive for all vfs */
1280 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
1281 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
1283 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
1284 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
1287 static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
1289 struct ixgbe_hw *hw = &adapter->hw;
1292 ping = IXGBE_PF_CONTROL_MSG;
1293 if (adapter->vfinfo[vf].clear_to_send)
1294 ping |= IXGBE_VT_MSGTYPE_CTS;
1295 ixgbe_write_mbx(hw, &ping, 1, vf);
1298 void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
1300 struct ixgbe_hw *hw = &adapter->hw;
1304 for (i = 0 ; i < adapter->num_vfs; i++) {
1305 ping = IXGBE_PF_CONTROL_MSG;
1306 if (adapter->vfinfo[i].clear_to_send)
1307 ping |= IXGBE_VT_MSGTYPE_CTS;
1308 ixgbe_write_mbx(hw, &ping, 1, i);
1312 int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1314 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1317 if (vf >= adapter->num_vfs)
1320 if (is_valid_ether_addr(mac)) {
1321 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
1323 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective.");
1325 retval = ixgbe_set_vf_mac(adapter, vf, mac);
1327 adapter->vfinfo[vf].pf_set_mac = true;
1329 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1330 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n");
1331 dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n");
1334 dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n");
1336 } else if (is_zero_ether_addr(mac)) {
1337 unsigned char *vf_mac_addr =
1338 adapter->vfinfo[vf].vf_mac_addresses;
1341 if (is_zero_ether_addr(vf_mac_addr))
1344 dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf);
1346 retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf);
1348 adapter->vfinfo[vf].pf_set_mac = false;
1349 memcpy(vf_mac_addr, mac, ETH_ALEN);
1351 dev_warn(&adapter->pdev->dev, "Could NOT remove the VF MAC address.\n");
1360 static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
1363 struct ixgbe_hw *hw = &adapter->hw;
1366 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
1370 /* Revoke tagless access via VLAN 0 */
1371 ixgbe_set_vf_vlan(adapter, false, 0, vf);
1373 ixgbe_set_vmvir(adapter, vlan, qos, vf);
1374 ixgbe_set_vmolr(hw, vf, false);
1376 /* enable hide vlan on X550 */
1377 if (hw->mac.type >= ixgbe_mac_X550)
1378 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE |
1379 IXGBE_QDE_HIDE_VLAN);
1381 adapter->vfinfo[vf].pf_vlan = vlan;
1382 adapter->vfinfo[vf].pf_qos = qos;
1383 dev_info(&adapter->pdev->dev,
1384 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
1385 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1386 dev_warn(&adapter->pdev->dev,
1387 "The VF VLAN has been set, but the PF device is not up.\n");
1388 dev_warn(&adapter->pdev->dev,
1389 "Bring the PF device up before attempting to use the VF device.\n");
1396 static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
1398 struct ixgbe_hw *hw = &adapter->hw;
1401 err = ixgbe_set_vf_vlan(adapter, false,
1402 adapter->vfinfo[vf].pf_vlan, vf);
1403 /* Restore tagless access via VLAN 0 */
1404 ixgbe_set_vf_vlan(adapter, true, 0, vf);
1405 ixgbe_clear_vmvir(adapter, vf);
1406 ixgbe_set_vmolr(hw, vf, true);
1408 /* disable hide VLAN on X550 */
1409 if (hw->mac.type >= ixgbe_mac_X550)
1410 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
1412 adapter->vfinfo[vf].pf_vlan = 0;
1413 adapter->vfinfo[vf].pf_qos = 0;
1418 int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1419 u8 qos, __be16 vlan_proto)
1422 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1424 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
1426 if (vlan_proto != htons(ETH_P_8021Q))
1427 return -EPROTONOSUPPORT;
1429 /* Check if there is already a port VLAN set, if so
1430 * we have to delete the old one first before we
1431 * can set the new one. The usage model had
1432 * previously assumed the user would delete the
1433 * old port VLAN before setting a new one but this
1434 * is not necessarily the case.
1436 if (adapter->vfinfo[vf].pf_vlan)
1437 err = ixgbe_disable_port_vlan(adapter, vf);
1440 err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos);
1442 err = ixgbe_disable_port_vlan(adapter, vf);
1449 int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
1451 switch (adapter->link_speed) {
1452 case IXGBE_LINK_SPEED_100_FULL:
1454 case IXGBE_LINK_SPEED_1GB_FULL:
1456 case IXGBE_LINK_SPEED_10GB_FULL:
1463 static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf)
1465 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
1466 struct ixgbe_hw *hw = &adapter->hw;
1468 u16 queue, queues_per_pool;
1469 u16 tx_rate = adapter->vfinfo[vf].tx_rate;
1472 /* start with base link speed value */
1473 bcnrc_val = adapter->vf_rate_link_speed;
1475 /* Calculate the rate factor values to set */
1476 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1477 bcnrc_val /= tx_rate;
1479 /* clear everything but the rate factor */
1480 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1481 IXGBE_RTTBCNRC_RF_DEC_MASK;
1483 /* enable the rate scheduler */
1484 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1488 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
1489 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
1490 * and 0x004 otherwise.
1492 switch (hw->mac.type) {
1493 case ixgbe_mac_82599EB:
1494 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
1496 case ixgbe_mac_X540:
1497 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
1503 /* determine how many queues per pool based on VMDq mask */
1504 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
1506 /* write value for all Tx queues belonging to VF */
1507 for (queue = 0; queue < queues_per_pool; queue++) {
1508 unsigned int reg_idx = (vf * queues_per_pool) + queue;
1510 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx);
1511 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1515 void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
1519 /* VF Tx rate limit was not set */
1520 if (!adapter->vf_rate_link_speed)
1523 if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) {
1524 adapter->vf_rate_link_speed = 0;
1525 dev_info(&adapter->pdev->dev,
1526 "Link speed has been changed. VF Transmit rate is disabled\n");
1529 for (i = 0; i < adapter->num_vfs; i++) {
1530 if (!adapter->vf_rate_link_speed)
1531 adapter->vfinfo[i].tx_rate = 0;
1533 ixgbe_set_vf_rate_limit(adapter, i);
1537 int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
1540 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1543 /* verify VF is active */
1544 if (vf >= adapter->num_vfs)
1547 /* verify link is up */
1548 if (!adapter->link_up)
1551 /* verify we are linked at 10Gbps */
1552 link_speed = ixgbe_link_mbps(adapter);
1553 if (link_speed != 10000)
1559 /* rate limit cannot be less than 10Mbs or greater than link speed */
1560 if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
1564 adapter->vf_rate_link_speed = link_speed;
1565 adapter->vfinfo[vf].tx_rate = max_tx_rate;
1567 /* update hardware configuration */
1568 ixgbe_set_vf_rate_limit(adapter, vf);
1573 int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
1575 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1576 struct ixgbe_hw *hw = &adapter->hw;
1578 if (vf >= adapter->num_vfs)
1581 adapter->vfinfo[vf].spoofchk_enabled = setting;
1583 /* configure MAC spoofing */
1584 hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
1586 /* configure VLAN spoofing */
1587 hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
1589 /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
1590 * calling set_ethertype_anti_spoofing for each VF in loop below
1592 if (hw->mac.ops.set_ethertype_anti_spoofing) {
1593 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
1594 (IXGBE_ETQF_FILTER_EN |
1595 IXGBE_ETQF_TX_ANTISPOOF |
1598 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
1599 (IXGBE_ETQF_FILTER_EN |
1600 IXGBE_ETQF_TX_ANTISPOOF |
1603 hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
1609 int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
1612 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1614 /* This operation is currently supported only for 82599 and x540
1617 if (adapter->hw.mac.type < ixgbe_mac_82599EB ||
1618 adapter->hw.mac.type >= ixgbe_mac_X550)
1621 if (vf >= adapter->num_vfs)
1624 adapter->vfinfo[vf].rss_query_enabled = setting;
1629 int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
1631 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1633 if (vf >= adapter->num_vfs)
1637 if (adapter->vfinfo[vf].trusted == setting)
1640 adapter->vfinfo[vf].trusted = setting;
1642 /* reset VF to reconfigure features */
1643 adapter->vfinfo[vf].clear_to_send = false;
1644 ixgbe_ping_vf(adapter, vf);
1646 e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not ");
1651 int ixgbe_ndo_get_vf_config(struct net_device *netdev,
1652 int vf, struct ifla_vf_info *ivi)
1654 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1655 if (vf >= adapter->num_vfs)
1658 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
1659 ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
1660 ivi->min_tx_rate = 0;
1661 ivi->vlan = adapter->vfinfo[vf].pf_vlan;
1662 ivi->qos = adapter->vfinfo[vf].pf_qos;
1663 ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
1664 ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
1665 ivi->trusted = adapter->vfinfo[vf].trusted;