#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
-#define HCLGE_BUF_SIZE_UNIT 256
+#define HCLGE_BUF_SIZE_UNIT 256U
#define HCLGE_BUF_MUL_BY 2
#define HCLGE_BUF_DIV_BY 2
+#define NEED_RESERVE_TC_NUM 2
+#define BUF_MAX_PERCENT 100
+#define BUF_RESERVE_PERCENT 90
#define HCLGE_RESET_MAX_FAIL_CNT 5
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
+static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
return buff;
}
-static u64 *hclge_comm_get_stats(void *comm_stats,
+static u64 *hclge_comm_get_stats(const void *comm_stats,
const struct hclge_comm_stats_str strs[],
int size, u64 *data)
{
return buff;
for (i = 0; i < size; i++) {
- snprintf(buff, ETH_GSTRING_LEN,
- strs[i].desc);
+ snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
buff = buff + ETH_GSTRING_LEN;
}
struct hclge_cfg_param_cmd *req;
u64 mac_addr_tmp_high;
u64 mac_addr_tmp;
- int i;
+ unsigned int i;
req = (struct hclge_cfg_param_cmd *)desc[0].data;
{
struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
struct hclge_cfg_param_cmd *req;
- int i, ret;
+ unsigned int i;
+ int ret;
for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
u32 offset = 0;
static int hclge_configure(struct hclge_dev *hdev)
{
struct hclge_cfg cfg;
- int ret, i;
+ unsigned int i;
+ int ret;
ret = hclge_get_cfg(hdev, &cfg);
if (ret) {
return ret;
}
-static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
- int tso_mss_max)
+static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
+ unsigned int tso_mss_max)
{
struct hclge_cfg_tso_status_cmd *req;
struct hclge_desc desc;
static u32 hclge_get_tc_num(struct hclge_dev *hdev)
{
- int i, cnt = 0;
+ unsigned int i;
+ u32 cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
if (hdev->hw_tc_map & BIT(i))
struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_priv_buf *priv;
- int i, cnt = 0;
+ unsigned int i;
+ int cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &buf_alloc->priv_buf[i];
struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_priv_buf *priv;
- int i, cnt = 0;
+ unsigned int i;
+ int cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &buf_alloc->priv_buf[i];
}
if (hnae3_dev_dcb_supported(hdev)) {
+ hi_thrd = shared_buf - hdev->dv_buf_size;
+
+ if (tc_num <= NEED_RESERVE_TC_NUM)
+ hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
+ / BUF_MAX_PERCENT;
+
if (tc_num)
- hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
- else
- hi_thrd = shared_buf - hdev->dv_buf_size;
+ hi_thrd = hi_thrd / tc_num;
hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
{
u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
- int i;
+ unsigned int i;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
/* let the last to be cleared first */
for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+ unsigned int mask = BIT((unsigned int)i);
- if (hdev->hw_tc_map & BIT(i) &&
- !(hdev->tm_info.hw_pfc_map & BIT(i))) {
+ if (hdev->hw_tc_map & mask &&
+ !(hdev->tm_info.hw_pfc_map & mask)) {
/* Clear the no pfc TC private buffer */
priv->wl.low = 0;
priv->wl.high = 0;
/* let the last to be cleared first */
for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+ unsigned int mask = BIT((unsigned int)i);
- if (hdev->hw_tc_map & BIT(i) &&
- hdev->tm_info.hw_pfc_map & BIT(i)) {
+ if (hdev->hw_tc_map & mask &&
+ hdev->tm_info.hw_pfc_map & mask) {
/* Reduce the number of pfc TC with private buffer */
priv->wl.low = 0;
priv->enable = 0;
return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
}
+static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+#define COMPENSATE_BUFFER 0x3C00
+#define COMPENSATE_HALF_MPS_NUM 5
+#define PRIV_WL_GAP 0x1800
+
+ u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
+ u32 tc_num = hclge_get_tc_num(hdev);
+ u32 half_mps = hdev->mps >> 1;
+ u32 min_rx_priv;
+ unsigned int i;
+
+ if (tc_num)
+ rx_priv = rx_priv / tc_num;
+
+ if (tc_num <= NEED_RESERVE_TC_NUM)
+ rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
+
+ min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
+ COMPENSATE_HALF_MPS_NUM * half_mps;
+ min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
+ rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
+
+ if (rx_priv < min_rx_priv)
+ return false;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+
+ priv->enable = 0;
+ priv->wl.low = 0;
+ priv->wl.high = 0;
+ priv->buf_size = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ priv->enable = 1;
+ priv->buf_size = rx_priv;
+ priv->wl.high = rx_priv - hdev->dv_buf_size;
+ priv->wl.low = priv->wl.high - PRIV_WL_GAP;
+ }
+
+ buf_alloc->s_buf.buf_size = 0;
+
+ return true;
+}
+
/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
* @hdev: pointer to struct hclge_dev
* @buf_alloc: pointer to buffer calculation data
return 0;
}
+ if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
+ return 0;
+
if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
return 0;
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
+static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
+ return hclge_set_autoneg_en(hdev, !halt);
+
+ return 0;
+}
+
static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
{
struct hclge_config_fec_cmd *req;
return ret;
}
+ if (hdev->hw.mac.support_autoneg) {
+ ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Config mac autoneg fail ret=%d\n", ret);
+ return ret;
+ }
+ }
+
mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
{
- int mac_state;
+ unsigned int mac_state;
int link_stat;
if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
{
struct hclge_dev *hdev = data;
+ u32 clearval = 0;
u32 event_cause;
- u32 clearval;
hclge_enable_vector(&hdev->misc_vector, false);
event_cause = hclge_check_event_cause(hdev, &clearval);
dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
hclge_clear_reset_cause(hdev);
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
mod_timer(&hdev->reset_timer,
jiffies + HCLGE_RESET_INTERVAL);
struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
dev_info(&hdev->pdev->dev,
- "triggering global reset in reset timer\n");
- set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
+ "triggering reset in reset timer\n");
hclge_reset_event(hdev->pdev, NULL);
}
hclge_update_port_info(hdev);
hclge_update_link_status(hdev);
hclge_update_vport_alive(hdev);
+ hclge_sync_vlan_filter(hdev);
if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
hclge_rfs_filter_expire(hdev);
hdev->fd_arfs_expire_timer = 0;
const u8 hfunc, const u8 *key)
{
struct hclge_rss_config_cmd *req;
+ unsigned int key_offset = 0;
struct hclge_desc desc;
- int key_offset = 0;
int key_counts;
int key_size;
int ret;
u16 tc_valid[HCLGE_MAX_TC_NUM];
u16 tc_size[HCLGE_MAX_TC_NUM];
u16 roundup_size;
- int i, ret;
+ unsigned int i;
+ int ret;
ret = hclge_set_rss_indir_table(hdev, rss_indir);
if (ret)
{
u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
u8 cur_pos = 0, tuple_size, shift_bits;
- int i;
+ unsigned int i;
for (i = 0; i < MAX_META_DATA; i++) {
tuple_size = meta_data_key_info[i].key_length;
struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
u8 *cur_key_x, *cur_key_y;
- int i, ret, tuple_size;
+ unsigned int i;
+ int ret, tuple_size;
u8 meta_data_region;
memset(key_x, 0, sizeof(key_x));
return -EBUSY;
}
-static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
+static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
int stream_id, bool enable)
{
struct hclge_desc desc;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
req->stream_id = cpu_to_le16(stream_id);
- req->enable |= enable << HCLGE_TQP_ENABLE_B;
+ if (enable)
+ req->enable |= 1U << HCLGE_TQP_ENABLE_B;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
}
-static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
bool is_kill, u16 vlan, u8 qos,
__be16 proto)
{
if (!req0->resp_code)
return 0;
- if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
- dev_warn(&hdev->pdev->dev,
- "vlan %d filter is not in vf vlan table\n",
- vlan);
+ /* vf vlan filter is disabled when vf vlan table is full,
+ * then new vlan id will not be added into vf vlan table.
+ * Just return 0 without warning, avoid massive verbose
+ * print logs when unload.
+ */
+ if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
return 0;
- }
dev_err(&hdev->pdev->dev,
"Kill vf vlan filter fail, ret =%d.\n",
bool writen_to_tbl = false;
int ret = 0;
- /* when port based VLAN enabled, we use port based VLAN as the VLAN
- * filter entry. In this case, we don't update VLAN filter table
- * when user add new VLAN or remove exist VLAN, just update the vport
- * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
- * table until port based VLAN disabled
+ /* When device is resetting, firmware is unable to handle
+ * mailbox. Just record the vlan id, and remove it after
+ * reset finished.
+ */
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
+ return -EBUSY;
+ }
+
+ /* When port base vlan enabled, we use port base vlan as the vlan
+ * filter entry. In this case, we don't update vlan filter table
+ * when user add new vlan or remove exist vlan, just update the vport
+ * vlan list. The vlan id in vlan list will be writen in vlan filter
+ * table until port base vlan disabled
*/
if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
writen_to_tbl = true;
}
- if (ret)
- return ret;
+ if (!ret) {
+ if (is_kill)
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ else
+ hclge_add_vport_vlan_table(vport, vlan_id,
+ writen_to_tbl);
+ } else if (is_kill) {
+ /* When remove hw vlan filter failed, record the vlan id,
+ * and try to remove it from hw later, to be consistence
+ * with stack
+ */
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
+ }
+ return ret;
+}
- if (is_kill)
- hclge_rm_vport_vlan_table(vport, vlan_id, false);
- else
- hclge_add_vport_vlan_table(vport, vlan_id,
- writen_to_tbl);
+static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
+{
+#define HCLGE_MAX_SYNC_COUNT 60
- return 0;
+ int i, ret, sync_cnt = 0;
+ u16 vlan_id;
+
+ /* start from vport 1 for PF is always alive */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
+ VLAN_N_VID);
+ while (vlan_id != VLAN_N_VID) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id, vlan_id,
+ 0, true);
+ if (ret && ret != -EINVAL)
+ return;
+
+ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+
+ sync_cnt++;
+ if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
+ return;
+
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
+ VLAN_N_VID);
+ }
+ }
}
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct phy_device *phydev = hdev->hw.mac.phydev;
- int mdix_ctrl, mdix, retval, is_resolved;
+ int mdix_ctrl, mdix, is_resolved;
+ unsigned int retval;
if (!phydev) {
*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
{
struct hnae3_client *client = vport->nic.client;
struct hclge_dev *hdev = ae_dev->priv;
+ int rst_cnt;
int ret;
+ rst_cnt = hdev->rst_stats.reset_cnt;
ret = client->ops->init_instance(&vport->nic);
if (ret)
return ret;
set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
- hnae3_set_client_init_flag(client, ae_dev, 1);
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ rst_cnt != hdev->rst_stats.reset_cnt) {
+ ret = -EBUSY;
+ goto init_nic_err;
+ }
/* Enable nic hw error interrupts */
ret = hclge_config_nic_hw_error(hdev, true);
- if (ret)
+ if (ret) {
dev_err(&ae_dev->pdev->dev,
"fail(%d) to enable hw error interrupts\n", ret);
+ goto init_nic_err;
+ }
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
if (netif_msg_drv(&hdev->vport->nic))
hclge_info_show(hdev);
return ret;
+
+init_nic_err:
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ client->ops->uninit_instance(&vport->nic, 0);
+
+ return ret;
}
static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hnae3_client *client = vport->roce.client;
struct hclge_dev *hdev = ae_dev->priv;
+ int rst_cnt;
int ret;
if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
if (ret)
return ret;
+ rst_cnt = hdev->rst_stats.reset_cnt;
ret = client->ops->init_instance(&vport->roce);
if (ret)
return ret;
set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ rst_cnt != hdev->rst_stats.reset_cnt) {
+ ret = -EBUSY;
+ goto init_roce_err;
+ }
+
+ /* Enable roce ras interrupts */
+ ret = hclge_config_rocee_ras_interrupt(hdev, true);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "fail(%d) to enable roce ras interrupts\n", ret);
+ goto init_roce_err;
+ }
+
hnae3_set_client_init_flag(client, ae_dev, 1);
return 0;
+
+init_roce_err:
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
+
+ return ret;
}
static int hclge_init_client_instance(struct hnae3_client *client,
}
}
- /* Enable roce ras interrupts */
- ret = hclge_config_rocee_ras_interrupt(hdev, true);
- if (ret)
- dev_err(&ae_dev->pdev->dev,
- "fail(%d) to enable roce ras interrupts\n", ret);
-
return ret;
clear_nic:
vport = &hdev->vport[i];
if (hdev->roce_client) {
clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
hdev->roce_client->ops->uninit_instance(&vport->roce,
0);
hdev->roce_client = NULL;
return;
if (hdev->nic_client && client->ops->uninit_instance) {
clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
client->ops->uninit_instance(&vport->nic, 0);
hdev->nic_client = NULL;
vport->nic.client = NULL;
set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
}
+static void hclge_clear_resetting_state(struct hclge_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+ int ret;
+
+ /* Send cmd to clear VF's FUNC_RST_ING */
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "clear vf(%d) rst failed %d!\n",
+ vport->vport_id, ret);
+ }
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
hclge_clear_all_event_cause(hdev);
+ hclge_clear_resetting_state(hdev);
+
+ /* Log and clear the hw errors those already occurred */
+ hclge_handle_all_hns_hw_errors(ae_dev);
+
+ /* request delayed reset for the error recovery because an immediate
+ * global reset on a PF affecting pending initialization of other PFs
+ */
+ if (ae_dev->hw_err_reset_req) {
+ enum hnae3_reset_type reset_level;
+
+ reset_level = hclge_get_reset_level(ae_dev,
+ &ae_dev->hw_err_reset_req);
+ hclge_set_def_reset_request(ae_dev, reset_level);
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
+ }
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
u16 tc_size[HCLGE_MAX_TC_NUM];
u16 roundup_size;
u32 *rss_indir;
- int ret, i;
+ unsigned int i;
+ int ret;
kinfo->req_rss_size = new_tqps_num;
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
.restart_autoneg = hclge_restart_autoneg,
+ .halt_autoneg = hclge_halt_autoneg,
.get_pauseparam = hclge_get_pauseparam,
.set_pauseparam = hclge_set_pauseparam,
.set_mtu = hclge_set_mtu,