net: hns3: refactor for function hclge_fd_convert_tuple
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
index 34b744d..3d601c9 100644 (file)
@@ -384,36 +384,56 @@ static const struct key_info meta_data_key_info[] = {
 };
 
 static const struct key_info tuple_key_info[] = {
-       { OUTER_DST_MAC, 48},
-       { OUTER_SRC_MAC, 48},
-       { OUTER_VLAN_TAG_FST, 16},
-       { OUTER_VLAN_TAG_SEC, 16},
-       { OUTER_ETH_TYPE, 16},
-       { OUTER_L2_RSV, 16},
-       { OUTER_IP_TOS, 8},
-       { OUTER_IP_PROTO, 8},
-       { OUTER_SRC_IP, 32},
-       { OUTER_DST_IP, 32},
-       { OUTER_L3_RSV, 16},
-       { OUTER_SRC_PORT, 16},
-       { OUTER_DST_PORT, 16},
-       { OUTER_L4_RSV, 32},
-       { OUTER_TUN_VNI, 24},
-       { OUTER_TUN_FLOW_ID, 8},
-       { INNER_DST_MAC, 48},
-       { INNER_SRC_MAC, 48},
-       { INNER_VLAN_TAG_FST, 16},
-       { INNER_VLAN_TAG_SEC, 16},
-       { INNER_ETH_TYPE, 16},
-       { INNER_L2_RSV, 16},
-       { INNER_IP_TOS, 8},
-       { INNER_IP_PROTO, 8},
-       { INNER_SRC_IP, 32},
-       { INNER_DST_IP, 32},
-       { INNER_L3_RSV, 16},
-       { INNER_SRC_PORT, 16},
-       { INNER_DST_PORT, 16},
-       { INNER_L4_RSV, 32},
+       { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
+       { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
+       { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
+       { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
+       { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
+       { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
+       { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
+       { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
+       { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
+       { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
+       { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
+       { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
+       { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
+       { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
+       { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
+       { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
+       { INNER_DST_MAC, 48, KEY_OPT_MAC,
+         offsetof(struct hclge_fd_rule, tuples.dst_mac),
+         offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
+       { INNER_SRC_MAC, 48, KEY_OPT_MAC,
+         offsetof(struct hclge_fd_rule, tuples.src_mac),
+         offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
+       { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
+         offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
+         offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
+       { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
+       { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
+         offsetof(struct hclge_fd_rule, tuples.ether_proto),
+         offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
+       { INNER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
+       { INNER_IP_TOS, 8, KEY_OPT_U8,
+         offsetof(struct hclge_fd_rule, tuples.ip_tos),
+         offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
+       { INNER_IP_PROTO, 8, KEY_OPT_U8,
+         offsetof(struct hclge_fd_rule, tuples.ip_proto),
+         offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
+       { INNER_SRC_IP, 32, KEY_OPT_IP,
+         offsetof(struct hclge_fd_rule, tuples.src_ip),
+         offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
+       { INNER_DST_IP, 32, KEY_OPT_IP,
+         offsetof(struct hclge_fd_rule, tuples.dst_ip),
+         offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
+       { INNER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
+       { INNER_SRC_PORT, 16, KEY_OPT_LE16,
+         offsetof(struct hclge_fd_rule, tuples.src_port),
+         offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
+       { INNER_DST_PORT, 16, KEY_OPT_LE16,
+         offsetof(struct hclge_fd_rule, tuples.dst_port),
+         offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
+       { INNER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
 };
 
 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
@@ -751,8 +771,9 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
                handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
                handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
 
-               if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
-                   hdev->hw.mac.phydev->drv->set_loopback) {
+               if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
+                    hdev->hw.mac.phydev->drv->set_loopback) ||
+                   hnae3_dev_phy_imp_supported(hdev)) {
                        count += 1;
                        handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
                }
@@ -1150,8 +1171,10 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
        if (hnae3_dev_fec_supported(hdev))
                hclge_convert_setting_fec(mac);
 
+       if (hnae3_dev_pause_supported(hdev))
+               linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
+
        linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
-       linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
        linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
 }
 
@@ -1163,8 +1186,11 @@ static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
        hclge_convert_setting_kr(mac, speed_ability);
        if (hnae3_dev_fec_supported(hdev))
                hclge_convert_setting_fec(mac);
+
+       if (hnae3_dev_pause_supported(hdev))
+               linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
+
        linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
-       linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
        linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
 }
 
@@ -1193,10 +1219,13 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
                linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
        }
 
+       if (hnae3_dev_pause_supported(hdev)) {
+               linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+               linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
+       }
+
        linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
        linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
-       linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
-       linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
 }
 
 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
@@ -2889,10 +2918,12 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
        clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
 }
 
-static void hclge_update_port_capability(struct hclge_mac *mac)
+static void hclge_update_port_capability(struct hclge_dev *hdev,
+                                        struct hclge_mac *mac)
 {
-       /* update fec ability by speed */
-       hclge_convert_setting_fec(mac);
+       if (hnae3_dev_fec_supported(hdev))
+               /* update fec ability by speed */
+               hclge_convert_setting_fec(mac);
 
        /* firmware can not identify back plane type, the media type
         * read from configuration can help deal it
@@ -2984,6 +3015,141 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
        return 0;
 }
 
+static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
+                                       struct ethtool_link_ksettings *cmd)
+{
+       struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_phy_link_ksetting_0_cmd *req0;
+       struct hclge_phy_link_ksetting_1_cmd *req1;
+       u32 supported, advertising, lp_advertising;
+       struct hclge_dev *hdev = vport->back;
+       int ret;
+
+       hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
+                                  true);
+       desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+       hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
+                                  true);
+
+       ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "failed to get phy link ksetting, ret = %d.\n", ret);
+               return ret;
+       }
+
+       req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
+       cmd->base.autoneg = req0->autoneg;
+       cmd->base.speed = le32_to_cpu(req0->speed);
+       cmd->base.duplex = req0->duplex;
+       cmd->base.port = req0->port;
+       cmd->base.transceiver = req0->transceiver;
+       cmd->base.phy_address = req0->phy_address;
+       cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
+       cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
+       supported = le32_to_cpu(req0->supported);
+       advertising = le32_to_cpu(req0->advertising);
+       lp_advertising = le32_to_cpu(req0->lp_advertising);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+                                               lp_advertising);
+
+       req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
+       cmd->base.master_slave_cfg = req1->master_slave_cfg;
+       cmd->base.master_slave_state = req1->master_slave_state;
+
+       return 0;
+}
+
+static int
+hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
+                            const struct ethtool_link_ksettings *cmd)
+{
+       struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_phy_link_ksetting_0_cmd *req0;
+       struct hclge_phy_link_ksetting_1_cmd *req1;
+       struct hclge_dev *hdev = vport->back;
+       u32 advertising;
+       int ret;
+
+       if (cmd->base.autoneg == AUTONEG_DISABLE &&
+           ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
+            (cmd->base.duplex != DUPLEX_HALF &&
+             cmd->base.duplex != DUPLEX_FULL)))
+               return -EINVAL;
+
+       hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
+                                  false);
+       desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+       hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
+                                  false);
+
+       req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
+       req0->autoneg = cmd->base.autoneg;
+       req0->speed = cpu_to_le32(cmd->base.speed);
+       req0->duplex = cmd->base.duplex;
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
+       req0->advertising = cpu_to_le32(advertising);
+       req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
+
+       req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
+       req1->master_slave_cfg = cmd->base.master_slave_cfg;
+
+       ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "failed to set phy link ksettings, ret = %d.\n", ret);
+               return ret;
+       }
+
+       hdev->hw.mac.autoneg = cmd->base.autoneg;
+       hdev->hw.mac.speed = cmd->base.speed;
+       hdev->hw.mac.duplex = cmd->base.duplex;
+       linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
+
+       return 0;
+}
+
+static int hclge_update_tp_port_info(struct hclge_dev *hdev)
+{
+       struct ethtool_link_ksettings cmd;
+       int ret;
+
+       if (!hnae3_dev_phy_imp_supported(hdev))
+               return 0;
+
+       ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
+       if (ret)
+               return ret;
+
+       hdev->hw.mac.autoneg = cmd.base.autoneg;
+       hdev->hw.mac.speed = cmd.base.speed;
+       hdev->hw.mac.duplex = cmd.base.duplex;
+
+       return 0;
+}
+
+static int hclge_tp_port_init(struct hclge_dev *hdev)
+{
+       struct ethtool_link_ksettings cmd;
+
+       if (!hnae3_dev_phy_imp_supported(hdev))
+               return 0;
+
+       cmd.base.autoneg = hdev->hw.mac.autoneg;
+       cmd.base.speed = hdev->hw.mac.speed;
+       cmd.base.duplex = hdev->hw.mac.duplex;
+       linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
+
+       return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
+}
+
 static int hclge_update_port_info(struct hclge_dev *hdev)
 {
        struct hclge_mac *mac = &hdev->hw.mac;
@@ -2992,7 +3158,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
 
        /* get the port info from SFP cmd if not copper port */
        if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
-               return 0;
+               return hclge_update_tp_port_info(hdev);
 
        /* if IMP does not support get SFP/qSFP info, return directly */
        if (!hdev->support_sfp_query)
@@ -3012,7 +3178,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
 
        if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
                if (mac->speed_type == QUERY_ACTIVE_SPEED) {
-                       hclge_update_port_capability(mac);
+                       hclge_update_port_capability(hdev, mac);
                        return 0;
                }
                return hclge_cfg_mac_speed_dup(hdev, mac->speed,
@@ -5225,96 +5391,57 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
                                   struct hclge_fd_rule *rule)
 {
+       int offset, moffset, ip_offset;
+       enum HCLGE_FD_KEY_OPT key_opt;
        u16 tmp_x_s, tmp_y_s;
        u32 tmp_x_l, tmp_y_l;
+       u8 *p = (u8 *)rule;
        int i;
 
-       if (rule->unused_tuple & tuple_bit)
+       if (rule->unused_tuple & BIT(tuple_bit))
                return true;
 
-       switch (tuple_bit) {
-       case BIT(INNER_DST_MAC):
-               for (i = 0; i < ETH_ALEN; i++) {
-                       calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
-                              rule->tuples_mask.dst_mac[i]);
-                       calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
-                              rule->tuples_mask.dst_mac[i]);
-               }
-
-               return true;
-       case BIT(INNER_SRC_MAC):
-               for (i = 0; i < ETH_ALEN; i++) {
-                       calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
-                              rule->tuples.src_mac[i]);
-                       calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
-                              rule->tuples.src_mac[i]);
-               }
+       key_opt = tuple_key_info[tuple_bit].key_opt;
+       offset = tuple_key_info[tuple_bit].offset;
+       moffset = tuple_key_info[tuple_bit].moffset;
 
-               return true;
-       case BIT(INNER_VLAN_TAG_FST):
-               calc_x(tmp_x_s, rule->tuples.vlan_tag1,
-                      rule->tuples_mask.vlan_tag1);
-               calc_y(tmp_y_s, rule->tuples.vlan_tag1,
-                      rule->tuples_mask.vlan_tag1);
-               *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
-               *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+       switch (key_opt) {
+       case KEY_OPT_U8:
+               calc_x(*key_x, p[offset], p[moffset]);
+               calc_y(*key_y, p[offset], p[moffset]);
 
                return true;
-       case BIT(INNER_ETH_TYPE):
-               calc_x(tmp_x_s, rule->tuples.ether_proto,
-                      rule->tuples_mask.ether_proto);
-               calc_y(tmp_y_s, rule->tuples.ether_proto,
-                      rule->tuples_mask.ether_proto);
+       case KEY_OPT_LE16:
+               calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
+               calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
                *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
                *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
 
                return true;
-       case BIT(INNER_IP_TOS):
-               calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
-               calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
-
-               return true;
-       case BIT(INNER_IP_PROTO):
-               calc_x(*key_x, rule->tuples.ip_proto,
-                      rule->tuples_mask.ip_proto);
-               calc_y(*key_y, rule->tuples.ip_proto,
-                      rule->tuples_mask.ip_proto);
-
-               return true;
-       case BIT(INNER_SRC_IP):
-               calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
-                      rule->tuples_mask.src_ip[IPV4_INDEX]);
-               calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
-                      rule->tuples_mask.src_ip[IPV4_INDEX]);
-               *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
-               *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
-
-               return true;
-       case BIT(INNER_DST_IP):
-               calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
-                      rule->tuples_mask.dst_ip[IPV4_INDEX]);
-               calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
-                      rule->tuples_mask.dst_ip[IPV4_INDEX]);
+       case KEY_OPT_LE32:
+               calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
+               calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
                *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
                *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
 
                return true;
-       case BIT(INNER_SRC_PORT):
-               calc_x(tmp_x_s, rule->tuples.src_port,
-                      rule->tuples_mask.src_port);
-               calc_y(tmp_y_s, rule->tuples.src_port,
-                      rule->tuples_mask.src_port);
-               *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
-               *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+       case KEY_OPT_MAC:
+               for (i = 0; i < ETH_ALEN; i++) {
+                       calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
+                              p[moffset + i]);
+                       calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
+                              p[moffset + i]);
+               }
 
                return true;
-       case BIT(INNER_DST_PORT):
-               calc_x(tmp_x_s, rule->tuples.dst_port,
-                      rule->tuples_mask.dst_port);
-               calc_y(tmp_y_s, rule->tuples.dst_port,
-                      rule->tuples_mask.dst_port);
-               *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
-               *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+       case KEY_OPT_IP:
+               ip_offset = IPV4_INDEX * sizeof(u32);
+               calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
+                      *(u32 *)(&p[moffset + ip_offset]));
+               calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
+                      *(u32 *)(&p[moffset + ip_offset]));
+               *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
+               *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
 
                return true;
        default:
@@ -5402,12 +5529,12 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
 
        for (i = 0 ; i < MAX_TUPLE; i++) {
                bool tuple_valid;
-               u32 check_tuple;
 
                tuple_size = tuple_key_info[i].key_length / 8;
-               check_tuple = key_cfg->tuple_active & BIT(i);
+               if (!(key_cfg->tuple_active & BIT(i)))
+                       continue;
 
-               tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
+               tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
                                                     cur_key_y, rule);
                if (tuple_valid) {
                        cur_key_x += tuple_size;
@@ -5789,144 +5916,158 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
        return 0;
 }
 
-static int hclge_fd_get_tuple(struct hclge_dev *hdev,
-                             struct ethtool_rx_flow_spec *fs,
-                             struct hclge_fd_rule *rule)
+static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
+                                     struct ethtool_rx_flow_spec *fs,
+                                     struct hclge_fd_rule *rule, u8 ip_proto)
 {
-       u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+       rule->tuples.src_ip[IPV4_INDEX] =
+                       be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
+       rule->tuples_mask.src_ip[IPV4_INDEX] =
+                       be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
 
-       switch (flow_type) {
-       case SCTP_V4_FLOW:
-       case TCP_V4_FLOW:
-       case UDP_V4_FLOW:
-               rule->tuples.src_ip[IPV4_INDEX] =
-                               be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
-               rule->tuples_mask.src_ip[IPV4_INDEX] =
-                               be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
+       rule->tuples.dst_ip[IPV4_INDEX] =
+                       be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
+       rule->tuples_mask.dst_ip[IPV4_INDEX] =
+                       be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
 
-               rule->tuples.dst_ip[IPV4_INDEX] =
-                               be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
-               rule->tuples_mask.dst_ip[IPV4_INDEX] =
-                               be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
+       rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
+       rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
 
-               rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
-               rule->tuples_mask.src_port =
-                               be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
+       rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
+       rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
 
-               rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
-               rule->tuples_mask.dst_port =
-                               be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
+       rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
+       rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
 
-               rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
-               rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
+       rule->tuples.ether_proto = ETH_P_IP;
+       rule->tuples_mask.ether_proto = 0xFFFF;
 
-               rule->tuples.ether_proto = ETH_P_IP;
-               rule->tuples_mask.ether_proto = 0xFFFF;
+       rule->tuples.ip_proto = ip_proto;
+       rule->tuples_mask.ip_proto = 0xFF;
+}
 
-               break;
-       case IP_USER_FLOW:
-               rule->tuples.src_ip[IPV4_INDEX] =
-                               be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
-               rule->tuples_mask.src_ip[IPV4_INDEX] =
-                               be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
+static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
+                                  struct ethtool_rx_flow_spec *fs,
+                                  struct hclge_fd_rule *rule)
+{
+       rule->tuples.src_ip[IPV4_INDEX] =
+                       be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
+       rule->tuples_mask.src_ip[IPV4_INDEX] =
+                       be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
 
-               rule->tuples.dst_ip[IPV4_INDEX] =
-                               be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
-               rule->tuples_mask.dst_ip[IPV4_INDEX] =
-                               be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
+       rule->tuples.dst_ip[IPV4_INDEX] =
+                       be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
+       rule->tuples_mask.dst_ip[IPV4_INDEX] =
+                       be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
 
-               rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
-               rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
+       rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
+       rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
 
-               rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
-               rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
+       rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
+       rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
 
-               rule->tuples.ether_proto = ETH_P_IP;
-               rule->tuples_mask.ether_proto = 0xFFFF;
+       rule->tuples.ether_proto = ETH_P_IP;
+       rule->tuples_mask.ether_proto = 0xFFFF;
+}
 
-               break;
-       case SCTP_V6_FLOW:
-       case TCP_V6_FLOW:
-       case UDP_V6_FLOW:
-               be32_to_cpu_array(rule->tuples.src_ip,
-                                 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
-               be32_to_cpu_array(rule->tuples_mask.src_ip,
-                                 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
+static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
+                                     struct ethtool_rx_flow_spec *fs,
+                                     struct hclge_fd_rule *rule, u8 ip_proto)
+{
+       be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
+                         IPV6_SIZE);
+       be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
+                         IPV6_SIZE);
 
-               be32_to_cpu_array(rule->tuples.dst_ip,
-                                 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
-               be32_to_cpu_array(rule->tuples_mask.dst_ip,
-                                 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
+       be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
+                         IPV6_SIZE);
+       be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
+                         IPV6_SIZE);
 
-               rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
-               rule->tuples_mask.src_port =
-                               be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
+       rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
+       rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
 
-               rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
-               rule->tuples_mask.dst_port =
-                               be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
+       rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
+       rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
 
-               rule->tuples.ether_proto = ETH_P_IPV6;
-               rule->tuples_mask.ether_proto = 0xFFFF;
+       rule->tuples.ether_proto = ETH_P_IPV6;
+       rule->tuples_mask.ether_proto = 0xFFFF;
 
-               break;
-       case IPV6_USER_FLOW:
-               be32_to_cpu_array(rule->tuples.src_ip,
-                                 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
-               be32_to_cpu_array(rule->tuples_mask.src_ip,
-                                 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
+       rule->tuples.ip_proto = ip_proto;
+       rule->tuples_mask.ip_proto = 0xFF;
+}
 
-               be32_to_cpu_array(rule->tuples.dst_ip,
-                                 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
-               be32_to_cpu_array(rule->tuples_mask.dst_ip,
-                                 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
+static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
+                                  struct ethtool_rx_flow_spec *fs,
+                                  struct hclge_fd_rule *rule)
+{
+       be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
+                         IPV6_SIZE);
+       be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
+                         IPV6_SIZE);
 
-               rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
-               rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
+       be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
+                         IPV6_SIZE);
+       be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
+                         IPV6_SIZE);
 
-               rule->tuples.ether_proto = ETH_P_IPV6;
-               rule->tuples_mask.ether_proto = 0xFFFF;
+       rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
+       rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
 
-               break;
-       case ETHER_FLOW:
-               ether_addr_copy(rule->tuples.src_mac,
-                               fs->h_u.ether_spec.h_source);
-               ether_addr_copy(rule->tuples_mask.src_mac,
-                               fs->m_u.ether_spec.h_source);
+       rule->tuples.ether_proto = ETH_P_IPV6;
+       rule->tuples_mask.ether_proto = 0xFFFF;
+}
 
-               ether_addr_copy(rule->tuples.dst_mac,
-                               fs->h_u.ether_spec.h_dest);
-               ether_addr_copy(rule->tuples_mask.dst_mac,
-                               fs->m_u.ether_spec.h_dest);
+static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
+                                    struct ethtool_rx_flow_spec *fs,
+                                    struct hclge_fd_rule *rule)
+{
+       ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
+       ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
 
-               rule->tuples.ether_proto =
-                               be16_to_cpu(fs->h_u.ether_spec.h_proto);
-               rule->tuples_mask.ether_proto =
-                               be16_to_cpu(fs->m_u.ether_spec.h_proto);
+       ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
+       ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
 
-               break;
-       default:
-               return -EOPNOTSUPP;
-       }
+       rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
+       rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
+}
+
+static int hclge_fd_get_tuple(struct hclge_dev *hdev,
+                             struct ethtool_rx_flow_spec *fs,
+                             struct hclge_fd_rule *rule)
+{
+       u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
 
        switch (flow_type) {
        case SCTP_V4_FLOW:
-       case SCTP_V6_FLOW:
-               rule->tuples.ip_proto = IPPROTO_SCTP;
-               rule->tuples_mask.ip_proto = 0xFF;
+               hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
                break;
        case TCP_V4_FLOW:
-       case TCP_V6_FLOW:
-               rule->tuples.ip_proto = IPPROTO_TCP;
-               rule->tuples_mask.ip_proto = 0xFF;
+               hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
                break;
        case UDP_V4_FLOW:
+               hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
+               break;
+       case IP_USER_FLOW:
+               hclge_fd_get_ip4_tuple(hdev, fs, rule);
+               break;
+       case SCTP_V6_FLOW:
+               hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
+               break;
+       case TCP_V6_FLOW:
+               hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
+               break;
        case UDP_V6_FLOW:
-               rule->tuples.ip_proto = IPPROTO_UDP;
-               rule->tuples_mask.ip_proto = 0xFF;
+               hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
                break;
-       default:
+       case IPV6_USER_FLOW:
+               hclge_fd_get_ip6_tuple(hdev, fs, rule);
                break;
+       case ETHER_FLOW:
+               hclge_fd_get_ether_tuple(hdev, fs, rule);
+               break;
+       default:
+               return -EOPNOTSUPP;
        }
 
        if (fs->flow_type & FLOW_EXT) {
@@ -5980,6 +6121,42 @@ static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
        return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
 }
 
+static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
+                                     u16 *vport_id, u8 *action, u16 *queue_id)
+{
+       struct hclge_vport *vport = hdev->vport;
+
+       if (ring_cookie == RX_CLS_FLOW_DISC) {
+               *action = HCLGE_FD_ACTION_DROP_PACKET;
+       } else {
+               u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
+               u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
+               u16 tqps;
+
+               if (vf > hdev->num_req_vfs) {
+                       dev_err(&hdev->pdev->dev,
+                               "Error: vf id (%u) > max vf num (%u)\n",
+                               vf, hdev->num_req_vfs);
+                       return -EINVAL;
+               }
+
+               *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
+               tqps = hdev->vport[vf].nic.kinfo.num_tqps;
+
+               if (ring >= tqps) {
+                       dev_err(&hdev->pdev->dev,
+                               "Error: queue id (%u) > max tqp num (%u)\n",
+                               ring, tqps - 1);
+                       return -EINVAL;
+               }
+
+               *action = HCLGE_FD_ACTION_SELECT_QUEUE;
+               *queue_id = ring;
+       }
+
+       return 0;
+}
+
 static int hclge_add_fd_entry(struct hnae3_handle *handle,
                              struct ethtool_rxnfc *cmd)
 {
@@ -6016,33 +6193,10 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
        if (ret)
                return ret;
 
-       if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
-               action = HCLGE_FD_ACTION_DROP_PACKET;
-       } else {
-               u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
-               u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
-               u16 tqps;
-
-               if (vf > hdev->num_req_vfs) {
-                       dev_err(&hdev->pdev->dev,
-                               "Error: vf id (%u) > max vf num (%u)\n",
-                               vf, hdev->num_req_vfs);
-                       return -EINVAL;
-               }
-
-               dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
-               tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
-
-               if (ring >= tqps) {
-                       dev_err(&hdev->pdev->dev,
-                               "Error: queue id (%u) > max tqp num (%u)\n",
-                               ring, tqps - 1);
-                       return -EINVAL;
-               }
-
-               action = HCLGE_FD_ACTION_SELECT_QUEUE;
-               q_index = ring;
-       }
+       ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
+                                        &action, &q_index);
+       if (ret)
+               return ret;
 
        rule = kzalloc(sizeof(*rule), GFP_KERNEL);
        if (!rule)
@@ -6330,8 +6484,7 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
                fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
                fs->m_ext.vlan_tci =
                                rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
-                               cpu_to_be16(VLAN_VID_MASK) :
-                               cpu_to_be16(rule->tuples_mask.vlan_tag1);
+                               0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
        }
 
        if (fs->flow_type & FLOW_MAC_EXT) {
@@ -7126,19 +7279,19 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
        return ret;
 }
 
-static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
+static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
                                     enum hnae3_loop loop_mode)
 {
-#define HCLGE_SERDES_RETRY_MS  10
-#define HCLGE_SERDES_RETRY_NUM 100
+#define HCLGE_COMMON_LB_RETRY_MS       10
+#define HCLGE_COMMON_LB_RETRY_NUM      100
 
-       struct hclge_serdes_lb_cmd *req;
+       struct hclge_common_lb_cmd *req;
        struct hclge_desc desc;
        int ret, i = 0;
        u8 loop_mode_b;
 
-       req = (struct hclge_serdes_lb_cmd *)desc.data;
-       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
+       req = (struct hclge_common_lb_cmd *)desc.data;
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
 
        switch (loop_mode) {
        case HNAE3_LOOP_SERIAL_SERDES:
@@ -7147,9 +7300,12 @@ static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
        case HNAE3_LOOP_PARALLEL_SERDES:
                loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
                break;
+       case HNAE3_LOOP_PHY:
+               loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
+               break;
        default:
                dev_err(&hdev->pdev->dev,
-                       "unsupported serdes loopback mode %d\n", loop_mode);
+                       "unsupported common loopback mode %d\n", loop_mode);
                return -ENOTSUPP;
        }
 
@@ -7163,39 +7319,39 @@ static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret) {
                dev_err(&hdev->pdev->dev,
-                       "serdes loopback set fail, ret = %d\n", ret);
+                       "common loopback set fail, ret = %d\n", ret);
                return ret;
        }
 
        do {
-               msleep(HCLGE_SERDES_RETRY_MS);
-               hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
+               msleep(HCLGE_COMMON_LB_RETRY_MS);
+               hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
                                           true);
                ret = hclge_cmd_send(&hdev->hw, &desc, 1);
                if (ret) {
                        dev_err(&hdev->pdev->dev,
-                               "serdes loopback get, ret = %d\n", ret);
+                               "common loopback get, ret = %d\n", ret);
                        return ret;
                }
-       } while (++i < HCLGE_SERDES_RETRY_NUM &&
-                !(req->result & HCLGE_CMD_SERDES_DONE_B));
+       } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
+                !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
 
-       if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
-               dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
+       if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
+               dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
                return -EBUSY;
-       } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
-               dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
+       } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
+               dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
                return -EIO;
        }
        return ret;
 }
 
-static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
+static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
                                     enum hnae3_loop loop_mode)
 {
        int ret;
 
-       ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
+       ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
        if (ret)
                return ret;
 
@@ -7244,8 +7400,12 @@ static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
        struct phy_device *phydev = hdev->hw.mac.phydev;
        int ret;
 
-       if (!phydev)
+       if (!phydev) {
+               if (hnae3_dev_phy_imp_supported(hdev))
+                       return hclge_set_common_loopback(hdev, en,
+                                                        HNAE3_LOOP_PHY);
                return -ENOTSUPP;
+       }
 
        if (en)
                ret = hclge_enable_phy_loopback(hdev, phydev);
@@ -7316,7 +7476,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
                break;
        case HNAE3_LOOP_SERIAL_SERDES:
        case HNAE3_LOOP_PARALLEL_SERDES:
-               ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
+               ret = hclge_set_common_loopback(hdev, en, loop_mode);
                break;
        case HNAE3_LOOP_PHY:
                ret = hclge_set_phy_loopback(hdev, en);
@@ -7349,11 +7509,11 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev)
        if (ret)
                return ret;
 
-       ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
+       ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
        if (ret)
                return ret;
 
-       return hclge_cfg_serdes_loopback(hdev, false,
+       return hclge_cfg_common_loopback(hdev, false,
                                         HNAE3_LOOP_PARALLEL_SERDES);
 }
 
@@ -8760,6 +8920,29 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
        return 0;
 }
 
+static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
+{
+       struct mii_ioctl_data *data = if_mii(ifr);
+
+       if (!hnae3_dev_phy_imp_supported(hdev))
+               return -EOPNOTSUPP;
+
+       switch (cmd) {
+       case SIOCGMIIPHY:
+               data->phy_id = hdev->hw.mac.phy_addr;
+               /* this command reads phy id and register at the same time */
+               fallthrough;
+       case SIOCGMIIREG:
+               data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
+               return 0;
+
+       case SIOCSMIIREG:
+               return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
                          int cmd)
 {
@@ -8767,7 +8950,7 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
        struct hclge_dev *hdev = vport->back;
 
        if (!hdev->hw.mac.phydev)
-               return -EOPNOTSUPP;
+               return hclge_mii_ioctl(hdev, ifr, cmd);
 
        return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
 }
@@ -10014,9 +10197,10 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
        struct hclge_dev *hdev = vport->back;
-       struct phy_device *phydev = hdev->hw.mac.phydev;
+       u8 media_type = hdev->hw.mac.media_type;
 
-       *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
+       *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
+                   hclge_get_autoneg(handle) : 0;
 
        if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
                *rx_en = 0;
@@ -10062,7 +10246,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
        struct phy_device *phydev = hdev->hw.mac.phydev;
        u32 fc_autoneg;
 
-       if (phydev) {
+       if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
                fc_autoneg = hclge_get_autoneg(handle);
                if (auto_neg != fc_autoneg) {
                        dev_info(&hdev->pdev->dev,
@@ -10081,7 +10265,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
 
        hclge_record_user_pauseparam(hdev, rx_en, tx_en);
 
-       if (!auto_neg)
+       if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
                return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
 
        if (phydev)
@@ -10639,7 +10823,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
        if (ret)
                goto err_msi_irq_uninit;
 
-       if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
+       if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
+           !hnae3_dev_phy_imp_supported(hdev)) {
                ret = hclge_mac_mdio_config(hdev);
                if (ret)
                        goto err_msi_irq_uninit;
@@ -11032,6 +11217,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
                return ret;
        }
 
+       ret = hclge_tp_port_init(hdev);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
+                       ret);
+               return ret;
+       }
+
        ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
        if (ret) {
                dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
@@ -11973,6 +12165,8 @@ static const struct hnae3_ae_ops hclge_ops = {
        .add_cls_flower = hclge_add_cls_flower,
        .del_cls_flower = hclge_del_cls_flower,
        .cls_flower_active = hclge_is_cls_flower_active,
+       .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
+       .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
 };
 
 static struct hnae3_ae_algo ae_algo = {