1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
4 * Copyright (C) 2015-2019 Google, Inc.
7 #include <linux/etherdevice.h>
10 #include "gve_adminq.h"
11 #include "gve_register.h"
13 #define GVE_MAX_ADMINQ_RELEASE_CHECK 500
14 #define GVE_ADMINQ_SLEEP_LEN 20
15 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100
17 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
19 priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
20 &priv->adminq_bus_addr, GFP_KERNEL);
21 if (unlikely(!priv->adminq))
24 priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;
25 priv->adminq_prod_cnt = 0;
26 priv->adminq_cmd_fail = 0;
27 priv->adminq_timeouts = 0;
28 priv->adminq_describe_device_cnt = 0;
29 priv->adminq_cfg_device_resources_cnt = 0;
30 priv->adminq_register_page_list_cnt = 0;
31 priv->adminq_unregister_page_list_cnt = 0;
32 priv->adminq_create_tx_queue_cnt = 0;
33 priv->adminq_create_rx_queue_cnt = 0;
34 priv->adminq_destroy_tx_queue_cnt = 0;
35 priv->adminq_destroy_rx_queue_cnt = 0;
36 priv->adminq_dcfg_device_resources_cnt = 0;
37 priv->adminq_set_driver_parameter_cnt = 0;
39 /* Setup Admin queue with the device */
40 iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
41 &priv->reg_bar0->adminq_pfn);
43 gve_set_admin_queue_ok(priv);
47 void gve_adminq_release(struct gve_priv *priv)
51 /* Tell the device the adminq is leaving */
52 iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
53 while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
54 /* If this is reached the device is unrecoverable and still
55 * holding memory. Continue looping to avoid memory corruption,
56 * but WARN so it is visible what is going on.
58 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
59 WARN(1, "Unrecoverable platform error!");
61 msleep(GVE_ADMINQ_SLEEP_LEN);
63 gve_clear_device_rings_ok(priv);
64 gve_clear_device_resources_ok(priv);
65 gve_clear_admin_queue_ok(priv);
68 void gve_adminq_free(struct device *dev, struct gve_priv *priv)
70 if (!gve_get_admin_queue_ok(priv))
72 gve_adminq_release(priv);
73 dma_free_coherent(dev, PAGE_SIZE, priv->adminq, priv->adminq_bus_addr);
74 gve_clear_admin_queue_ok(priv);
77 static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt)
79 iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell);
82 static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
86 for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {
87 if (ioread32be(&priv->reg_bar0->adminq_event_counter)
90 msleep(GVE_ADMINQ_SLEEP_LEN);
96 static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
98 if (status != GVE_ADMINQ_COMMAND_PASSED &&
99 status != GVE_ADMINQ_COMMAND_UNSET) {
100 dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status);
101 priv->adminq_cmd_fail++;
104 case GVE_ADMINQ_COMMAND_PASSED:
106 case GVE_ADMINQ_COMMAND_UNSET:
107 dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
109 case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
110 case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
111 case GVE_ADMINQ_COMMAND_ERROR_DATALOSS:
112 case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:
113 case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:
115 case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:
116 case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:
117 case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:
118 case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:
119 case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:
120 case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:
122 case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:
124 case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:
125 case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:
127 case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
129 case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
132 dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status);
137 /* This function is not threadsafe - the caller is responsible for any
140 int gve_adminq_execute_cmd(struct gve_priv *priv,
141 union gve_adminq_command *cmd_orig)
143 union gve_adminq_command *cmd;
148 cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
149 priv->adminq_prod_cnt++;
150 prod_cnt = priv->adminq_prod_cnt;
152 memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
153 opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
156 case GVE_ADMINQ_DESCRIBE_DEVICE:
157 priv->adminq_describe_device_cnt++;
159 case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
160 priv->adminq_cfg_device_resources_cnt++;
162 case GVE_ADMINQ_REGISTER_PAGE_LIST:
163 priv->adminq_register_page_list_cnt++;
165 case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
166 priv->adminq_unregister_page_list_cnt++;
168 case GVE_ADMINQ_CREATE_TX_QUEUE:
169 priv->adminq_create_tx_queue_cnt++;
171 case GVE_ADMINQ_CREATE_RX_QUEUE:
172 priv->adminq_create_rx_queue_cnt++;
174 case GVE_ADMINQ_DESTROY_TX_QUEUE:
175 priv->adminq_destroy_tx_queue_cnt++;
177 case GVE_ADMINQ_DESTROY_RX_QUEUE:
178 priv->adminq_destroy_rx_queue_cnt++;
180 case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
181 priv->adminq_dcfg_device_resources_cnt++;
183 case GVE_ADMINQ_SET_DRIVER_PARAMETER:
184 priv->adminq_set_driver_parameter_cnt++;
187 dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
190 gve_adminq_kick_cmd(priv, prod_cnt);
191 if (!gve_adminq_wait_for_cmd(priv, prod_cnt)) {
192 dev_err(&priv->pdev->dev, "AQ command timed out, need to reset AQ\n");
193 priv->adminq_timeouts++;
194 return -ENOTRECOVERABLE;
197 memcpy(cmd_orig, cmd, sizeof(*cmd));
198 status = be32_to_cpu(READ_ONCE(cmd->status));
199 return gve_adminq_parse_err(priv, status);
202 /* The device specifies that the management vector can either be the first irq
203 * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
204 * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
205 * the management vector is first.
207 * gve arranges the msix vectors so that the management vector is last.
209 #define GVE_NTFY_BLK_BASE_MSIX_IDX 0
210 int gve_adminq_configure_device_resources(struct gve_priv *priv,
211 dma_addr_t counter_array_bus_addr,
213 dma_addr_t db_array_bus_addr,
216 union gve_adminq_command cmd;
218 memset(&cmd, 0, sizeof(cmd));
219 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);
220 cmd.configure_device_resources =
221 (struct gve_adminq_configure_device_resources) {
222 .counter_array = cpu_to_be64(counter_array_bus_addr),
223 .num_counters = cpu_to_be32(num_counters),
224 .irq_db_addr = cpu_to_be64(db_array_bus_addr),
225 .num_irq_dbs = cpu_to_be32(num_ntfy_blks),
226 .irq_db_stride = cpu_to_be32(sizeof(priv->ntfy_blocks[0])),
227 .ntfy_blk_msix_base_idx =
228 cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
231 return gve_adminq_execute_cmd(priv, &cmd);
234 int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
236 union gve_adminq_command cmd;
238 memset(&cmd, 0, sizeof(cmd));
239 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);
241 return gve_adminq_execute_cmd(priv, &cmd);
244 int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
246 struct gve_tx_ring *tx = &priv->tx[queue_index];
247 union gve_adminq_command cmd;
249 memset(&cmd, 0, sizeof(cmd));
250 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
251 cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
252 .queue_id = cpu_to_be32(queue_index),
254 .queue_resources_addr = cpu_to_be64(tx->q_resources_bus),
255 .tx_ring_addr = cpu_to_be64(tx->bus),
256 .queue_page_list_id = cpu_to_be32(tx->tx_fifo.qpl->id),
257 .ntfy_id = cpu_to_be32(tx->ntfy_id),
260 return gve_adminq_execute_cmd(priv, &cmd);
263 int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
265 struct gve_rx_ring *rx = &priv->rx[queue_index];
266 union gve_adminq_command cmd;
268 memset(&cmd, 0, sizeof(cmd));
269 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
270 cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
271 .queue_id = cpu_to_be32(queue_index),
272 .index = cpu_to_be32(queue_index),
274 .ntfy_id = cpu_to_be32(rx->ntfy_id),
275 .queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
276 .rx_desc_ring_addr = cpu_to_be64(rx->desc.bus),
277 .rx_data_ring_addr = cpu_to_be64(rx->data.data_bus),
278 .queue_page_list_id = cpu_to_be32(rx->data.qpl->id),
281 return gve_adminq_execute_cmd(priv, &cmd);
284 int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
286 union gve_adminq_command cmd;
288 memset(&cmd, 0, sizeof(cmd));
289 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
290 cmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) {
291 .queue_id = cpu_to_be32(queue_index),
294 return gve_adminq_execute_cmd(priv, &cmd);
297 int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
299 union gve_adminq_command cmd;
301 memset(&cmd, 0, sizeof(cmd));
302 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
303 cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
304 .queue_id = cpu_to_be32(queue_index),
307 return gve_adminq_execute_cmd(priv, &cmd);
310 int gve_adminq_describe_device(struct gve_priv *priv)
312 struct gve_device_descriptor *descriptor;
313 union gve_adminq_command cmd;
314 dma_addr_t descriptor_bus;
319 memset(&cmd, 0, sizeof(cmd));
320 descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
321 &descriptor_bus, GFP_KERNEL);
324 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE);
325 cmd.describe_device.device_descriptor_addr =
326 cpu_to_be64(descriptor_bus);
327 cmd.describe_device.device_descriptor_version =
328 cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
329 cmd.describe_device.available_length = cpu_to_be32(PAGE_SIZE);
331 err = gve_adminq_execute_cmd(priv, &cmd);
333 goto free_device_descriptor;
335 priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
336 if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
337 netif_err(priv, drv, priv->dev, "Tx desc count %d too low\n",
340 goto free_device_descriptor;
342 priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
343 if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
345 priv->rx_desc_cnt * sizeof(priv->rx->data.data_ring[0])
347 netif_err(priv, drv, priv->dev, "Rx desc count %d too low\n",
350 goto free_device_descriptor;
352 priv->max_registered_pages =
353 be64_to_cpu(descriptor->max_registered_pages);
354 mtu = be16_to_cpu(descriptor->mtu);
355 if (mtu < ETH_MIN_MTU) {
356 netif_err(priv, drv, priv->dev, "MTU %d below minimum MTU\n",
359 goto free_device_descriptor;
361 priv->dev->max_mtu = mtu;
362 priv->num_event_counters = be16_to_cpu(descriptor->counters);
363 ether_addr_copy(priv->dev->dev_addr, descriptor->mac);
364 mac = descriptor->mac;
365 netif_info(priv, drv, priv->dev, "MAC addr: %pM\n", mac);
366 priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
367 priv->rx_pages_per_qpl = be16_to_cpu(descriptor->rx_pages_per_qpl);
368 if (priv->rx_pages_per_qpl < priv->rx_desc_cnt) {
369 netif_err(priv, drv, priv->dev, "rx_pages_per_qpl cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
370 priv->rx_pages_per_qpl);
371 priv->rx_desc_cnt = priv->rx_pages_per_qpl;
373 priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
375 free_device_descriptor:
376 dma_free_coherent(&priv->pdev->dev, sizeof(*descriptor), descriptor,
381 int gve_adminq_register_page_list(struct gve_priv *priv,
382 struct gve_queue_page_list *qpl)
384 struct device *hdev = &priv->pdev->dev;
385 u32 num_entries = qpl->num_entries;
386 u32 size = num_entries * sizeof(qpl->page_buses[0]);
387 union gve_adminq_command cmd;
388 dma_addr_t page_list_bus;
393 memset(&cmd, 0, sizeof(cmd));
394 page_list = dma_alloc_coherent(hdev, size, &page_list_bus, GFP_KERNEL);
398 for (i = 0; i < num_entries; i++)
399 page_list[i] = cpu_to_be64(qpl->page_buses[i]);
401 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST);
402 cmd.reg_page_list = (struct gve_adminq_register_page_list) {
403 .page_list_id = cpu_to_be32(qpl->id),
404 .num_pages = cpu_to_be32(num_entries),
405 .page_address_list_addr = cpu_to_be64(page_list_bus),
408 err = gve_adminq_execute_cmd(priv, &cmd);
409 dma_free_coherent(hdev, size, page_list, page_list_bus);
413 int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
415 union gve_adminq_command cmd;
417 memset(&cmd, 0, sizeof(cmd));
418 cmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);
419 cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {
420 .page_list_id = cpu_to_be32(page_list_id),
423 return gve_adminq_execute_cmd(priv, &cmd);
426 int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
428 union gve_adminq_command cmd;
430 memset(&cmd, 0, sizeof(cmd));
431 cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
432 cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
433 .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
434 .parameter_value = cpu_to_be64(mtu),
437 return gve_adminq_execute_cmd(priv, &cmd);