1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/device.h>
5 #include <linux/dma-direction.h>
6 #include <linux/dma-mapping.h>
9 #include <linux/slab.h>
10 #include "hclgevf_cmd.h"
11 #include "hclgevf_main.h"
14 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
16 static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
18 int ntc = ring->next_to_clean;
19 int ntu = ring->next_to_use;
22 used = (ntu - ntc + ring->desc_num) % ring->desc_num;
24 return ring->desc_num - used - 1;
27 static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
30 int ntu = ring->next_to_use;
31 int ntc = ring->next_to_clean;
34 return head >= ntc && head <= ntu;
36 return head >= ntc || head <= ntu;
39 static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
41 struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
42 struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
46 head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
47 rmb(); /* Make sure head is ready before touch any data */
49 if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
50 dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
51 csq->next_to_use, csq->next_to_clean);
52 dev_warn(&hdev->pdev->dev,
53 "Disabling any further commands to IMP firmware\n");
54 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
58 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
59 csq->next_to_clean = head;
63 static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
67 head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
69 return head == hw->cmq.csq.next_to_use;
72 static bool hclgevf_is_special_opcode(u16 opcode)
74 const u16 spec_opcode[] = {0x30, 0x31, 0x32};
77 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
78 if (spec_opcode[i] == opcode)
85 static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
87 struct hclgevf_dev *hdev = ring->dev;
88 struct hclgevf_hw *hw = &hdev->hw;
91 if (ring->flag == HCLGEVF_TYPE_CSQ) {
92 reg_val = lower_32_bits(ring->desc_dma_addr);
93 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
94 reg_val = upper_32_bits(ring->desc_dma_addr);
95 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
97 reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
98 reg_val &= HCLGEVF_NIC_SW_RST_RDY;
99 reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
100 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
102 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
103 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
105 reg_val = lower_32_bits(ring->desc_dma_addr);
106 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
107 reg_val = upper_32_bits(ring->desc_dma_addr);
108 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
110 reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
111 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
113 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
114 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
118 static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
120 hclgevf_cmd_config_regs(&hw->cmq.csq);
121 hclgevf_cmd_config_regs(&hw->cmq.crq);
124 static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
126 int size = ring->desc_num * sizeof(struct hclgevf_desc);
128 ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
129 &ring->desc_dma_addr, GFP_KERNEL);
136 static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
138 int size = ring->desc_num * sizeof(struct hclgevf_desc);
141 dma_free_coherent(cmq_ring_to_dev(ring), size,
142 ring->desc, ring->desc_dma_addr);
147 static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
149 struct hclgevf_hw *hw = &hdev->hw;
150 struct hclgevf_cmq_ring *ring =
151 (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
155 ring->flag = ring_type;
157 /* allocate CSQ/CRQ descriptor */
158 ret = hclgevf_alloc_cmd_desc(ring);
160 dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
161 (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
166 void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
167 enum hclgevf_opcode_type opcode, bool is_read)
169 memset(desc, 0, sizeof(struct hclgevf_desc));
170 desc->opcode = cpu_to_le16(opcode);
171 desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
172 HCLGEVF_CMD_FLAG_IN);
174 desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
176 desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
184 static void hclgevf_cmd_copy_desc(struct hclgevf_hw *hw,
185 struct hclgevf_desc *desc, int num)
187 struct hclgevf_desc *desc_to_use;
190 while (handle < num) {
191 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
192 *desc_to_use = desc[handle];
193 (hw->cmq.csq.next_to_use)++;
194 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
195 hw->cmq.csq.next_to_use = 0;
200 static int hclgevf_cmd_convert_err_code(u16 desc_ret)
202 struct vf_errcode hclgevf_cmd_errcode[] = {
203 {HCLGEVF_CMD_EXEC_SUCCESS, 0},
204 {HCLGEVF_CMD_NO_AUTH, -EPERM},
205 {HCLGEVF_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
206 {HCLGEVF_CMD_QUEUE_FULL, -EXFULL},
207 {HCLGEVF_CMD_NEXT_ERR, -ENOSR},
208 {HCLGEVF_CMD_UNEXE_ERR, -ENOTBLK},
209 {HCLGEVF_CMD_PARA_ERR, -EINVAL},
210 {HCLGEVF_CMD_RESULT_ERR, -ERANGE},
211 {HCLGEVF_CMD_TIMEOUT, -ETIME},
212 {HCLGEVF_CMD_HILINK_ERR, -ENOLINK},
213 {HCLGEVF_CMD_QUEUE_ILLEGAL, -ENXIO},
214 {HCLGEVF_CMD_INVALID, -EBADR},
216 u32 errcode_count = ARRAY_SIZE(hclgevf_cmd_errcode);
219 for (i = 0; i < errcode_count; i++)
220 if (hclgevf_cmd_errcode[i].imp_errcode == desc_ret)
221 return hclgevf_cmd_errcode[i].common_errno;
226 static int hclgevf_cmd_check_retval(struct hclgevf_hw *hw,
227 struct hclgevf_desc *desc, int num, int ntc)
229 u16 opcode, desc_ret;
232 opcode = le16_to_cpu(desc[0].opcode);
233 for (handle = 0; handle < num; handle++) {
234 /* Get the result of hardware write back */
235 desc[handle] = hw->cmq.csq.desc[ntc];
237 if (ntc == hw->cmq.csq.desc_num)
240 if (likely(!hclgevf_is_special_opcode(opcode)))
241 desc_ret = le16_to_cpu(desc[num - 1].retval);
243 desc_ret = le16_to_cpu(desc[0].retval);
244 hw->cmq.last_status = desc_ret;
246 return hclgevf_cmd_convert_err_code(desc_ret);
249 static int hclgevf_cmd_check_result(struct hclgevf_hw *hw,
250 struct hclgevf_desc *desc, int num, int ntc)
252 struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
253 bool is_completed = false;
257 /* If the command is sync, wait for the firmware to write back,
258 * if multi descriptors to be sent, use the first one to check
260 if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
262 if (hclgevf_cmd_csq_done(hw)) {
268 } while (timeout < hw->cmq.tx_timeout);
274 ret = hclgevf_cmd_check_retval(hw, desc, num, ntc);
276 /* Clean the command send queue */
277 handle = hclgevf_cmd_csq_clean(hw);
280 else if (handle != num)
281 dev_warn(&hdev->pdev->dev,
282 "cleaned %d, need to clean %d\n", handle, num);
286 /* hclgevf_cmd_send - send command to command queue
287 * @hw: pointer to the hw struct
288 * @desc: prefilled descriptor for describing the command
289 * @num : the number of descriptors to be sent
291 * This is the main send command for command queue, it
292 * sends the queue, cleans the queue, etc
294 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
296 struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
297 struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
301 spin_lock_bh(&hw->cmq.csq.lock);
303 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
304 spin_unlock_bh(&hw->cmq.csq.lock);
308 if (num > hclgevf_ring_space(&hw->cmq.csq)) {
309 /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
310 * need update the SW HEAD pointer csq->next_to_clean
312 csq->next_to_clean = hclgevf_read_dev(hw,
313 HCLGEVF_NIC_CSQ_HEAD_REG);
314 spin_unlock_bh(&hw->cmq.csq.lock);
318 /* Record the location of desc in the ring for this time
319 * which will be use for hardware to write back
321 ntc = hw->cmq.csq.next_to_use;
323 hclgevf_cmd_copy_desc(hw, desc, num);
325 /* Write to hardware */
326 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
327 hw->cmq.csq.next_to_use);
329 ret = hclgevf_cmd_check_result(hw, desc, num, ntc);
331 spin_unlock_bh(&hw->cmq.csq.lock);
336 static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
338 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
340 set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
341 set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
342 set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
345 const struct hclgevf_caps_bit_map hclgevf_cmd_caps_bit_map0[] = {
346 {HCLGEVF_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
347 {HCLGEVF_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
348 {HCLGEVF_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
349 {HCLGEVF_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
350 {HCLGEVF_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
351 {HCLGEVF_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
354 static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
355 struct hclgevf_query_version_cmd *cmd)
357 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
360 caps = __le32_to_cpu(cmd->caps[0]);
361 for (i = 0; i < ARRAY_SIZE(hclgevf_cmd_caps_bit_map0); i++)
362 if (hnae3_get_bit(caps, hclgevf_cmd_caps_bit_map0[i].imp_bit))
363 set_bit(hclgevf_cmd_caps_bit_map0[i].local_bit,
367 static __le32 hclgevf_build_api_caps(void)
371 hnae3_set_bit(api_caps, HCLGEVF_API_CAP_FLEX_RSS_TBL_B, 1);
373 return cpu_to_le32(api_caps);
376 static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
378 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
379 struct hclgevf_query_version_cmd *resp;
380 struct hclgevf_desc desc;
383 resp = (struct hclgevf_query_version_cmd *)desc.data;
385 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
386 resp->api_caps = hclgevf_build_api_caps();
387 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
391 hdev->fw_version = le32_to_cpu(resp->firmware);
393 ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
394 HNAE3_PCI_REVISION_BIT_SIZE;
395 ae_dev->dev_version |= hdev->pdev->revision;
397 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
398 hclgevf_set_default_capability(hdev);
400 hclgevf_parse_capability(hdev, resp);
405 int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
409 /* Setup the lock for command queue */
410 spin_lock_init(&hdev->hw.cmq.csq.lock);
411 spin_lock_init(&hdev->hw.cmq.crq.lock);
413 hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
414 hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
415 hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
417 ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
419 dev_err(&hdev->pdev->dev,
420 "CSQ ring setup error %d\n", ret);
424 ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
426 dev_err(&hdev->pdev->dev,
427 "CRQ ring setup error %d\n", ret);
433 hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
437 int hclgevf_cmd_init(struct hclgevf_dev *hdev)
441 spin_lock_bh(&hdev->hw.cmq.csq.lock);
442 spin_lock(&hdev->hw.cmq.crq.lock);
444 /* initialize the pointers of async rx queue of mailbox */
445 hdev->arq.hdev = hdev;
448 atomic_set(&hdev->arq.count, 0);
449 hdev->hw.cmq.csq.next_to_clean = 0;
450 hdev->hw.cmq.csq.next_to_use = 0;
451 hdev->hw.cmq.crq.next_to_clean = 0;
452 hdev->hw.cmq.crq.next_to_use = 0;
454 hclgevf_cmd_init_regs(&hdev->hw);
456 spin_unlock(&hdev->hw.cmq.crq.lock);
457 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
459 clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
461 /* Check if there is new reset pending, because the higher level
462 * reset may happen when lower level reset is being processed.
464 if (hclgevf_is_reset_pending(hdev)) {
469 /* get version and device capabilities */
470 ret = hclgevf_cmd_query_version_and_capability(hdev);
472 dev_err(&hdev->pdev->dev,
473 "failed to query version and capabilities, ret = %d\n", ret);
477 dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
478 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
479 HNAE3_FW_VERSION_BYTE3_SHIFT),
480 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
481 HNAE3_FW_VERSION_BYTE2_SHIFT),
482 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
483 HNAE3_FW_VERSION_BYTE1_SHIFT),
484 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
485 HNAE3_FW_VERSION_BYTE0_SHIFT));
490 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
495 static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
497 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0);
498 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0);
499 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0);
500 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
501 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
502 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0);
503 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0);
504 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0);
505 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
506 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
509 void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
511 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
512 /* wait to ensure that the firmware completes the possible left
515 msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME);
516 spin_lock_bh(&hdev->hw.cmq.csq.lock);
517 spin_lock(&hdev->hw.cmq.crq.lock);
518 hclgevf_cmd_uninit_regs(&hdev->hw);
519 spin_unlock(&hdev->hw.cmq.crq.lock);
520 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
522 hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
523 hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);