1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
4 #include <linux/module.h>
6 #include <linux/utsname.h>
7 #include <linux/version.h>
9 #include <net/mana/mana.h>
11 static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
13 return readl(g->bar0_va + offset);
16 static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
18 return readq(g->bar0_va + offset);
21 static void mana_gd_init_pf_regs(struct pci_dev *pdev)
23 struct gdma_context *gc = pci_get_drvdata(pdev);
24 void __iomem *sriov_base_va;
27 gc->db_page_size = mana_gd_r32(gc, GDMA_PF_REG_DB_PAGE_SIZE) & 0xFFFF;
28 gc->db_page_base = gc->bar0_va +
29 mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
31 sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
33 sriov_base_va = gc->bar0_va + sriov_base_off;
34 gc->shm_base = sriov_base_va +
35 mana_gd_r64(gc, sriov_base_off + GDMA_PF_REG_SHM_OFF);
38 static void mana_gd_init_vf_regs(struct pci_dev *pdev)
40 struct gdma_context *gc = pci_get_drvdata(pdev);
42 gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
44 gc->db_page_base = gc->bar0_va +
45 mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
47 gc->phys_db_page_base = gc->bar0_pa +
48 mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
50 gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
53 static void mana_gd_init_registers(struct pci_dev *pdev)
55 struct gdma_context *gc = pci_get_drvdata(pdev);
58 mana_gd_init_pf_regs(pdev);
60 mana_gd_init_vf_regs(pdev);
63 static int mana_gd_query_max_resources(struct pci_dev *pdev)
65 struct gdma_context *gc = pci_get_drvdata(pdev);
66 struct gdma_query_max_resources_resp resp = {};
67 struct gdma_general_req req = {};
70 mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
71 sizeof(req), sizeof(resp));
73 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
74 if (err || resp.hdr.status) {
75 dev_err(gc->dev, "Failed to query resource info: %d, 0x%x\n",
76 err, resp.hdr.status);
77 return err ? err : -EPROTO;
80 if (gc->num_msix_usable > resp.max_msix)
81 gc->num_msix_usable = resp.max_msix;
83 if (gc->num_msix_usable <= 1)
86 gc->max_num_queues = num_online_cpus();
87 if (gc->max_num_queues > MANA_MAX_NUM_QUEUES)
88 gc->max_num_queues = MANA_MAX_NUM_QUEUES;
90 if (gc->max_num_queues > resp.max_eq)
91 gc->max_num_queues = resp.max_eq;
93 if (gc->max_num_queues > resp.max_cq)
94 gc->max_num_queues = resp.max_cq;
96 if (gc->max_num_queues > resp.max_sq)
97 gc->max_num_queues = resp.max_sq;
99 if (gc->max_num_queues > resp.max_rq)
100 gc->max_num_queues = resp.max_rq;
102 /* The Hardware Channel (HWC) used 1 MSI-X */
103 if (gc->max_num_queues > gc->num_msix_usable - 1)
104 gc->max_num_queues = gc->num_msix_usable - 1;
109 static int mana_gd_detect_devices(struct pci_dev *pdev)
111 struct gdma_context *gc = pci_get_drvdata(pdev);
112 struct gdma_list_devices_resp resp = {};
113 struct gdma_general_req req = {};
114 struct gdma_dev_id dev;
119 mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
122 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
123 if (err || resp.hdr.status) {
124 dev_err(gc->dev, "Failed to detect devices: %d, 0x%x\n", err,
126 return err ? err : -EPROTO;
129 max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
131 for (i = 0; i < max_num_devs; i++) {
135 /* HWC is already detected in mana_hwc_create_channel(). */
136 if (dev_type == GDMA_DEVICE_HWC)
139 if (dev_type == GDMA_DEVICE_MANA) {
140 gc->mana.gdma_context = gc;
141 gc->mana.dev_id = dev;
145 return gc->mana.dev_id.type == 0 ? -ENODEV : 0;
148 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
149 u32 resp_len, void *resp)
151 struct hw_channel_context *hwc = gc->hwc.driver_data;
153 return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
155 EXPORT_SYMBOL_NS(mana_gd_send_request, NET_MANA);
157 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
158 struct gdma_mem_info *gmi)
160 dma_addr_t dma_handle;
163 if (length < PAGE_SIZE || !is_power_of_2(length))
167 buf = dma_alloc_coherent(gmi->dev, length, &dma_handle, GFP_KERNEL);
171 gmi->dma_handle = dma_handle;
172 gmi->virt_addr = buf;
173 gmi->length = length;
178 void mana_gd_free_memory(struct gdma_mem_info *gmi)
180 dma_free_coherent(gmi->dev, gmi->length, gmi->virt_addr,
184 static int mana_gd_create_hw_eq(struct gdma_context *gc,
185 struct gdma_queue *queue)
187 struct gdma_create_queue_resp resp = {};
188 struct gdma_create_queue_req req = {};
191 if (queue->type != GDMA_EQ)
194 mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE,
195 sizeof(req), sizeof(resp));
197 req.hdr.dev_id = queue->gdma_dev->dev_id;
198 req.type = queue->type;
199 req.pdid = queue->gdma_dev->pdid;
200 req.doolbell_id = queue->gdma_dev->doorbell;
201 req.gdma_region = queue->mem_info.gdma_region;
202 req.queue_size = queue->queue_size;
203 req.log2_throttle_limit = queue->eq.log2_throttle_limit;
204 req.eq_pci_msix_index = queue->eq.msix_index;
206 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
207 if (err || resp.hdr.status) {
208 dev_err(gc->dev, "Failed to create queue: %d, 0x%x\n", err,
210 return err ? err : -EPROTO;
213 queue->id = resp.queue_index;
214 queue->eq.disable_needed = true;
215 queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
219 static int mana_gd_disable_queue(struct gdma_queue *queue)
221 struct gdma_context *gc = queue->gdma_dev->gdma_context;
222 struct gdma_disable_queue_req req = {};
223 struct gdma_general_resp resp = {};
226 WARN_ON(queue->type != GDMA_EQ);
228 mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE,
229 sizeof(req), sizeof(resp));
231 req.hdr.dev_id = queue->gdma_dev->dev_id;
232 req.type = queue->type;
233 req.queue_index = queue->id;
234 req.alloc_res_id_on_creation = 1;
236 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
237 if (err || resp.hdr.status) {
238 dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
240 return err ? err : -EPROTO;
246 #define DOORBELL_OFFSET_SQ 0x0
247 #define DOORBELL_OFFSET_RQ 0x400
248 #define DOORBELL_OFFSET_CQ 0x800
249 #define DOORBELL_OFFSET_EQ 0xFF8
251 static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index,
252 enum gdma_queue_type q_type, u32 qid,
253 u32 tail_ptr, u8 num_req)
255 void __iomem *addr = gc->db_page_base + gc->db_page_size * db_index;
256 union gdma_doorbell_entry e = {};
261 e.eq.tail_ptr = tail_ptr;
264 addr += DOORBELL_OFFSET_EQ;
269 e.cq.tail_ptr = tail_ptr;
272 addr += DOORBELL_OFFSET_CQ;
277 e.rq.tail_ptr = tail_ptr;
278 e.rq.wqe_cnt = num_req;
280 addr += DOORBELL_OFFSET_RQ;
285 e.sq.tail_ptr = tail_ptr;
287 addr += DOORBELL_OFFSET_SQ;
295 /* Ensure all writes are done before ring doorbell */
298 writeq(e.as_uint64, addr);
301 void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
303 mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
304 queue->id, queue->head * GDMA_WQE_BU_SIZE, 1);
307 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
309 struct gdma_context *gc = cq->gdma_dev->gdma_context;
311 u32 num_cqe = cq->queue_size / GDMA_CQE_SIZE;
313 u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS);
315 mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
319 static void mana_gd_process_eqe(struct gdma_queue *eq)
321 u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
322 struct gdma_context *gc = eq->gdma_dev->gdma_context;
323 struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
324 union gdma_eqe_info eqe_info;
325 enum gdma_eqe_type type;
326 struct gdma_event event;
327 struct gdma_queue *cq;
328 struct gdma_eqe *eqe;
331 eqe = &eq_eqe_ptr[head];
332 eqe_info.as_uint32 = eqe->eqe_info;
333 type = eqe_info.type;
336 case GDMA_EQE_COMPLETION:
337 cq_id = eqe->details[0] & 0xFFFFFF;
338 if (WARN_ON_ONCE(cq_id >= gc->max_num_cqs))
341 cq = gc->cq_table[cq_id];
342 if (WARN_ON_ONCE(!cq || cq->type != GDMA_CQ || cq->id != cq_id))
346 cq->cq.callback(cq->cq.context, cq);
350 case GDMA_EQE_TEST_EVENT:
351 gc->test_event_eq_id = eq->id;
352 complete(&gc->eq_test_event);
355 case GDMA_EQE_HWC_INIT_EQ_ID_DB:
356 case GDMA_EQE_HWC_INIT_DATA:
357 case GDMA_EQE_HWC_INIT_DONE:
358 if (!eq->eq.callback)
362 memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE);
363 eq->eq.callback(eq->eq.context, eq, &event);
371 static void mana_gd_process_eq_events(void *arg)
373 u32 owner_bits, new_bits, old_bits;
374 union gdma_eqe_info eqe_info;
375 struct gdma_eqe *eq_eqe_ptr;
376 struct gdma_queue *eq = arg;
377 struct gdma_context *gc;
378 struct gdma_eqe *eqe;
382 gc = eq->gdma_dev->gdma_context;
384 num_eqe = eq->queue_size / GDMA_EQE_SIZE;
385 eq_eqe_ptr = eq->queue_mem_ptr;
387 /* Process up to 5 EQEs at a time, and update the HW head. */
388 for (i = 0; i < 5; i++) {
389 eqe = &eq_eqe_ptr[eq->head % num_eqe];
390 eqe_info.as_uint32 = eqe->eqe_info;
391 owner_bits = eqe_info.owner_bits;
393 old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
394 /* No more entries */
395 if (owner_bits == old_bits)
398 new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
399 if (owner_bits != new_bits) {
400 dev_err(gc->dev, "EQ %d: overflow detected\n", eq->id);
404 /* Per GDMA spec, rmb is necessary after checking owner_bits, before
409 mana_gd_process_eqe(eq);
414 head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
416 mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
420 static int mana_gd_register_irq(struct gdma_queue *queue,
421 const struct gdma_queue_spec *spec)
423 struct gdma_dev *gd = queue->gdma_dev;
424 struct gdma_irq_context *gic;
425 struct gdma_context *gc;
426 struct gdma_resource *r;
427 unsigned int msi_index;
432 gc = gd->gdma_context;
433 r = &gc->msix_resource;
436 spin_lock_irqsave(&r->lock, flags);
438 msi_index = find_first_zero_bit(r->map, r->size);
439 if (msi_index >= r->size || msi_index >= gc->num_msix_usable) {
442 bitmap_set(r->map, msi_index, 1);
443 queue->eq.msix_index = msi_index;
446 spin_unlock_irqrestore(&r->lock, flags);
449 dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u",
450 err, msi_index, r->size, gc->num_msix_usable);
455 gic = &gc->irq_contexts[msi_index];
457 WARN_ON(gic->handler || gic->arg);
461 gic->handler = mana_gd_process_eq_events;
466 static void mana_gd_deregiser_irq(struct gdma_queue *queue)
468 struct gdma_dev *gd = queue->gdma_dev;
469 struct gdma_irq_context *gic;
470 struct gdma_context *gc;
471 struct gdma_resource *r;
472 unsigned int msix_index;
475 gc = gd->gdma_context;
476 r = &gc->msix_resource;
478 /* At most num_online_cpus() + 1 interrupts are used. */
479 msix_index = queue->eq.msix_index;
480 if (WARN_ON(msix_index >= gc->num_msix_usable))
483 gic = &gc->irq_contexts[msix_index];
487 spin_lock_irqsave(&r->lock, flags);
488 bitmap_clear(r->map, msix_index, 1);
489 spin_unlock_irqrestore(&r->lock, flags);
491 queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
494 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
496 struct gdma_generate_test_event_req req = {};
497 struct gdma_general_resp resp = {};
498 struct device *dev = gc->dev;
501 mutex_lock(&gc->eq_test_event_mutex);
503 init_completion(&gc->eq_test_event);
504 gc->test_event_eq_id = INVALID_QUEUE_ID;
506 mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE,
507 sizeof(req), sizeof(resp));
509 req.hdr.dev_id = eq->gdma_dev->dev_id;
510 req.queue_index = eq->id;
512 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
514 dev_err(dev, "test_eq failed: %d\n", err);
520 if (resp.hdr.status) {
521 dev_err(dev, "test_eq failed: 0x%x\n", resp.hdr.status);
525 if (!wait_for_completion_timeout(&gc->eq_test_event, 30 * HZ)) {
526 dev_err(dev, "test_eq timed out on queue %d\n", eq->id);
530 if (eq->id != gc->test_event_eq_id) {
531 dev_err(dev, "test_eq got an event on wrong queue %d (%d)\n",
532 gc->test_event_eq_id, eq->id);
538 mutex_unlock(&gc->eq_test_event_mutex);
542 static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
543 struct gdma_queue *queue)
548 err = mana_gd_test_eq(gc, queue);
550 dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
553 mana_gd_deregiser_irq(queue);
555 if (queue->eq.disable_needed)
556 mana_gd_disable_queue(queue);
559 static int mana_gd_create_eq(struct gdma_dev *gd,
560 const struct gdma_queue_spec *spec,
561 bool create_hwq, struct gdma_queue *queue)
563 struct gdma_context *gc = gd->gdma_context;
564 struct device *dev = gc->dev;
565 u32 log2_num_entries;
568 queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
570 log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
572 if (spec->eq.log2_throttle_limit > log2_num_entries) {
573 dev_err(dev, "EQ throttling limit (%lu) > maximum EQE (%u)\n",
574 spec->eq.log2_throttle_limit, log2_num_entries);
578 err = mana_gd_register_irq(queue, spec);
580 dev_err(dev, "Failed to register irq: %d\n", err);
584 queue->eq.callback = spec->eq.callback;
585 queue->eq.context = spec->eq.context;
586 queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
587 queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;
590 err = mana_gd_create_hw_eq(gc, queue);
594 err = mana_gd_test_eq(gc, queue);
601 dev_err(dev, "Failed to create EQ: %d\n", err);
602 mana_gd_destroy_eq(gc, false, queue);
606 static void mana_gd_create_cq(const struct gdma_queue_spec *spec,
607 struct gdma_queue *queue)
609 u32 log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE);
611 queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
612 queue->cq.parent = spec->cq.parent_eq;
613 queue->cq.context = spec->cq.context;
614 queue->cq.callback = spec->cq.callback;
617 static void mana_gd_destroy_cq(struct gdma_context *gc,
618 struct gdma_queue *queue)
622 if (id >= gc->max_num_cqs)
625 if (!gc->cq_table[id])
628 gc->cq_table[id] = NULL;
631 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
632 const struct gdma_queue_spec *spec,
633 struct gdma_queue **queue_ptr)
635 struct gdma_context *gc = gd->gdma_context;
636 struct gdma_mem_info *gmi;
637 struct gdma_queue *queue;
640 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
644 gmi = &queue->mem_info;
645 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
651 queue->queue_mem_ptr = gmi->virt_addr;
652 queue->queue_size = spec->queue_size;
653 queue->monitor_avl_buf = spec->monitor_avl_buf;
654 queue->type = spec->type;
655 queue->gdma_dev = gd;
657 if (spec->type == GDMA_EQ)
658 err = mana_gd_create_eq(gd, spec, false, queue);
659 else if (spec->type == GDMA_CQ)
660 mana_gd_create_cq(spec, queue);
668 mana_gd_free_memory(gmi);
674 static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region)
676 struct gdma_destroy_dma_region_req req = {};
677 struct gdma_general_resp resp = {};
680 if (gdma_region == GDMA_INVALID_DMA_REGION)
683 mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
685 req.gdma_region = gdma_region;
687 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
688 if (err || resp.hdr.status)
689 dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
690 err, resp.hdr.status);
693 static int mana_gd_create_dma_region(struct gdma_dev *gd,
694 struct gdma_mem_info *gmi)
696 unsigned int num_page = gmi->length / PAGE_SIZE;
697 struct gdma_create_dma_region_req *req = NULL;
698 struct gdma_create_dma_region_resp resp = {};
699 struct gdma_context *gc = gd->gdma_context;
700 struct hw_channel_context *hwc;
701 u32 length = gmi->length;
706 if (length < PAGE_SIZE || !is_power_of_2(length))
709 if (offset_in_page(gmi->virt_addr) != 0)
712 hwc = gc->hwc.driver_data;
713 req_msg_size = struct_size(req, page_addr_list, num_page);
714 if (req_msg_size > hwc->max_req_msg_size)
717 req = kzalloc(req_msg_size, GFP_KERNEL);
721 mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION,
722 req_msg_size, sizeof(resp));
723 req->length = length;
724 req->offset_in_page = 0;
725 req->gdma_page_type = GDMA_PAGE_TYPE_4K;
726 req->page_count = num_page;
727 req->page_addr_list_len = num_page;
729 for (i = 0; i < num_page; i++)
730 req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE;
732 err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
736 if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
737 dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
743 gmi->gdma_region = resp.gdma_region;
749 int mana_gd_create_mana_eq(struct gdma_dev *gd,
750 const struct gdma_queue_spec *spec,
751 struct gdma_queue **queue_ptr)
753 struct gdma_context *gc = gd->gdma_context;
754 struct gdma_mem_info *gmi;
755 struct gdma_queue *queue;
758 if (spec->type != GDMA_EQ)
761 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
765 gmi = &queue->mem_info;
766 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
770 err = mana_gd_create_dma_region(gd, gmi);
776 queue->queue_mem_ptr = gmi->virt_addr;
777 queue->queue_size = spec->queue_size;
778 queue->monitor_avl_buf = spec->monitor_avl_buf;
779 queue->type = spec->type;
780 queue->gdma_dev = gd;
782 err = mana_gd_create_eq(gd, spec, true, queue);
789 mana_gd_free_memory(gmi);
795 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
796 const struct gdma_queue_spec *spec,
797 struct gdma_queue **queue_ptr)
799 struct gdma_context *gc = gd->gdma_context;
800 struct gdma_mem_info *gmi;
801 struct gdma_queue *queue;
804 if (spec->type != GDMA_CQ && spec->type != GDMA_SQ &&
805 spec->type != GDMA_RQ)
808 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
812 gmi = &queue->mem_info;
813 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
817 err = mana_gd_create_dma_region(gd, gmi);
823 queue->queue_mem_ptr = gmi->virt_addr;
824 queue->queue_size = spec->queue_size;
825 queue->monitor_avl_buf = spec->monitor_avl_buf;
826 queue->type = spec->type;
827 queue->gdma_dev = gd;
829 if (spec->type == GDMA_CQ)
830 mana_gd_create_cq(spec, queue);
835 mana_gd_free_memory(gmi);
841 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
843 struct gdma_mem_info *gmi = &queue->mem_info;
845 switch (queue->type) {
847 mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue);
851 mana_gd_destroy_cq(gc, queue);
861 dev_err(gc->dev, "Can't destroy unknown queue: type=%d\n",
866 mana_gd_destroy_dma_region(gc, gmi->gdma_region);
867 mana_gd_free_memory(gmi);
871 int mana_gd_verify_vf_version(struct pci_dev *pdev)
873 struct gdma_context *gc = pci_get_drvdata(pdev);
874 struct gdma_verify_ver_resp resp = {};
875 struct gdma_verify_ver_req req = {};
878 mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION,
879 sizeof(req), sizeof(resp));
881 req.protocol_ver_min = GDMA_PROTOCOL_FIRST;
882 req.protocol_ver_max = GDMA_PROTOCOL_LAST;
884 req.gd_drv_cap_flags1 = GDMA_DRV_CAP_FLAGS1;
885 req.gd_drv_cap_flags2 = GDMA_DRV_CAP_FLAGS2;
886 req.gd_drv_cap_flags3 = GDMA_DRV_CAP_FLAGS3;
887 req.gd_drv_cap_flags4 = GDMA_DRV_CAP_FLAGS4;
889 req.drv_ver = 0; /* Unused*/
890 req.os_type = 0x10; /* Linux */
891 req.os_ver_major = LINUX_VERSION_MAJOR;
892 req.os_ver_minor = LINUX_VERSION_PATCHLEVEL;
893 req.os_ver_build = LINUX_VERSION_SUBLEVEL;
894 strscpy(req.os_ver_str1, utsname()->sysname, sizeof(req.os_ver_str1));
895 strscpy(req.os_ver_str2, utsname()->release, sizeof(req.os_ver_str2));
896 strscpy(req.os_ver_str3, utsname()->version, sizeof(req.os_ver_str3));
898 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
899 if (err || resp.hdr.status) {
900 dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n",
901 err, resp.hdr.status);
902 return err ? err : -EPROTO;
908 int mana_gd_register_device(struct gdma_dev *gd)
910 struct gdma_context *gc = gd->gdma_context;
911 struct gdma_register_device_resp resp = {};
912 struct gdma_general_req req = {};
915 gd->pdid = INVALID_PDID;
916 gd->doorbell = INVALID_DOORBELL;
917 gd->gpa_mkey = INVALID_MEM_KEY;
919 mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req),
922 req.hdr.dev_id = gd->dev_id;
924 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
925 if (err || resp.hdr.status) {
926 dev_err(gc->dev, "gdma_register_device_resp failed: %d, 0x%x\n",
927 err, resp.hdr.status);
928 return err ? err : -EPROTO;
931 gd->pdid = resp.pdid;
932 gd->gpa_mkey = resp.gpa_mkey;
933 gd->doorbell = resp.db_id;
938 int mana_gd_deregister_device(struct gdma_dev *gd)
940 struct gdma_context *gc = gd->gdma_context;
941 struct gdma_general_resp resp = {};
942 struct gdma_general_req req = {};
945 if (gd->pdid == INVALID_PDID)
948 mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req),
951 req.hdr.dev_id = gd->dev_id;
953 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
954 if (err || resp.hdr.status) {
955 dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
956 err, resp.hdr.status);
961 gd->pdid = INVALID_PDID;
962 gd->doorbell = INVALID_DOORBELL;
963 gd->gpa_mkey = INVALID_MEM_KEY;
968 u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
970 u32 used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE;
971 u32 wq_size = wq->queue_size;
973 WARN_ON_ONCE(used_space > wq_size);
975 return wq_size - used_space;
978 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset)
980 u32 offset = (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1);
982 WARN_ON_ONCE((offset + GDMA_WQE_BU_SIZE) > wq->queue_size);
984 return wq->queue_mem_ptr + offset;
987 static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
988 enum gdma_queue_type q_type,
989 u32 client_oob_size, u32 sgl_data_size,
992 bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL);
993 bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0);
994 struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr;
997 memset(header, 0, sizeof(struct gdma_wqe));
998 header->num_sge = wqe_req->num_sge;
999 header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
1002 WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
1004 header->client_oob_in_sgl = 1;
1007 header->last_vbytes = wqe_req->sgl[0].size;
1010 if (q_type == GDMA_SQ)
1011 header->client_data_unit = wqe_req->client_data_unit;
1013 /* The size of gdma_wqe + client_oob_size must be less than or equal
1014 * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond
1015 * the queue memory buffer boundary.
1017 ptr = wqe_ptr + sizeof(header);
1019 if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) {
1020 memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size);
1022 if (client_oob_size > wqe_req->inline_oob_size)
1023 memset(ptr + wqe_req->inline_oob_size, 0,
1024 client_oob_size - wqe_req->inline_oob_size);
1027 return sizeof(header) + client_oob_size;
1030 static void mana_gd_write_sgl(struct gdma_queue *wq, u8 *wqe_ptr,
1031 const struct gdma_wqe_request *wqe_req)
1033 u32 sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1034 const u8 *address = (u8 *)wqe_req->sgl;
1035 u8 *base_ptr, *end_ptr;
1038 base_ptr = wq->queue_mem_ptr;
1039 end_ptr = base_ptr + wq->queue_size;
1040 size_to_end = (u32)(end_ptr - wqe_ptr);
1042 if (size_to_end < sgl_size) {
1043 memcpy(wqe_ptr, address, size_to_end);
1046 address += size_to_end;
1047 sgl_size -= size_to_end;
1050 memcpy(wqe_ptr, address, sgl_size);
1053 int mana_gd_post_work_request(struct gdma_queue *wq,
1054 const struct gdma_wqe_request *wqe_req,
1055 struct gdma_posted_wqe_info *wqe_info)
1057 u32 client_oob_size = wqe_req->inline_oob_size;
1058 struct gdma_context *gc;
1064 if (wqe_req->num_sge == 0)
1067 if (wq->type == GDMA_RQ) {
1068 if (client_oob_size != 0)
1071 client_oob_size = INLINE_OOB_SMALL_SIZE;
1073 max_wqe_size = GDMA_MAX_RQE_SIZE;
1075 if (client_oob_size != INLINE_OOB_SMALL_SIZE &&
1076 client_oob_size != INLINE_OOB_LARGE_SIZE)
1079 max_wqe_size = GDMA_MAX_SQE_SIZE;
1082 sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1083 wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size +
1084 sgl_data_size, GDMA_WQE_BU_SIZE);
1085 if (wqe_size > max_wqe_size)
1088 if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
1089 gc = wq->gdma_dev->gdma_context;
1090 dev_err(gc->dev, "unsuccessful flow control!\n");
1095 wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
1097 wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head);
1098 wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size,
1099 sgl_data_size, wqe_ptr);
1100 if (wqe_ptr >= (u8 *)wq->queue_mem_ptr + wq->queue_size)
1101 wqe_ptr -= wq->queue_size;
1103 mana_gd_write_sgl(wq, wqe_ptr, wqe_req);
1105 wq->head += wqe_size / GDMA_WQE_BU_SIZE;
1110 int mana_gd_post_and_ring(struct gdma_queue *queue,
1111 const struct gdma_wqe_request *wqe_req,
1112 struct gdma_posted_wqe_info *wqe_info)
1114 struct gdma_context *gc = queue->gdma_dev->gdma_context;
1117 err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
1121 mana_gd_wq_ring_doorbell(gc, queue);
1126 static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
1128 unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe);
1129 struct gdma_cqe *cq_cqe = cq->queue_mem_ptr;
1130 u32 owner_bits, new_bits, old_bits;
1131 struct gdma_cqe *cqe;
1133 cqe = &cq_cqe[cq->head % num_cqe];
1134 owner_bits = cqe->cqe_info.owner_bits;
1136 old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK;
1137 /* Return 0 if no more entries. */
1138 if (owner_bits == old_bits)
1141 new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK;
1142 /* Return -1 if overflow detected. */
1143 if (WARN_ON_ONCE(owner_bits != new_bits))
1146 /* Per GDMA spec, rmb is necessary after checking owner_bits, before
1147 * reading completion info
1151 comp->wq_num = cqe->cqe_info.wq_num;
1152 comp->is_sq = cqe->cqe_info.is_sq;
1153 memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
1158 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
1163 for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) {
1164 ret = mana_gd_read_cqe(cq, &comp[cqe_idx]);
1167 cq->head -= cqe_idx;
1180 static irqreturn_t mana_gd_intr(int irq, void *arg)
1182 struct gdma_irq_context *gic = arg;
1185 gic->handler(gic->arg);
1190 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r)
1192 r->map = bitmap_zalloc(res_avail, GFP_KERNEL);
1196 r->size = res_avail;
1197 spin_lock_init(&r->lock);
1202 void mana_gd_free_res_map(struct gdma_resource *r)
1204 bitmap_free(r->map);
1209 static int mana_gd_setup_irqs(struct pci_dev *pdev)
1211 unsigned int max_queues_per_port = num_online_cpus();
1212 struct gdma_context *gc = pci_get_drvdata(pdev);
1213 struct gdma_irq_context *gic;
1214 unsigned int max_irqs;
1218 if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
1219 max_queues_per_port = MANA_MAX_NUM_QUEUES;
1221 /* Need 1 interrupt for the Hardware communication Channel (HWC) */
1222 max_irqs = max_queues_per_port + 1;
1224 nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
1228 gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
1230 if (!gc->irq_contexts) {
1232 goto free_irq_vector;
1235 for (i = 0; i < nvec; i++) {
1236 gic = &gc->irq_contexts[i];
1237 gic->handler = NULL;
1240 irq = pci_irq_vector(pdev, i);
1246 err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
1251 err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
1255 gc->max_num_msix = nvec;
1256 gc->num_msix_usable = nvec;
1261 for (j = i - 1; j >= 0; j--) {
1262 irq = pci_irq_vector(pdev, j);
1263 gic = &gc->irq_contexts[j];
1267 kfree(gc->irq_contexts);
1268 gc->irq_contexts = NULL;
1270 pci_free_irq_vectors(pdev);
1274 static void mana_gd_remove_irqs(struct pci_dev *pdev)
1276 struct gdma_context *gc = pci_get_drvdata(pdev);
1277 struct gdma_irq_context *gic;
1280 if (gc->max_num_msix < 1)
1283 mana_gd_free_res_map(&gc->msix_resource);
1285 for (i = 0; i < gc->max_num_msix; i++) {
1286 irq = pci_irq_vector(pdev, i);
1290 gic = &gc->irq_contexts[i];
1294 pci_free_irq_vectors(pdev);
1296 gc->max_num_msix = 0;
1297 gc->num_msix_usable = 0;
1298 kfree(gc->irq_contexts);
1299 gc->irq_contexts = NULL;
1302 static int mana_gd_setup(struct pci_dev *pdev)
1304 struct gdma_context *gc = pci_get_drvdata(pdev);
1307 mana_gd_init_registers(pdev);
1308 mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
1310 err = mana_gd_setup_irqs(pdev);
1314 err = mana_hwc_create_channel(gc);
1318 err = mana_gd_verify_vf_version(pdev);
1322 err = mana_gd_query_max_resources(pdev);
1326 err = mana_gd_detect_devices(pdev);
1333 mana_hwc_destroy_channel(gc);
1335 mana_gd_remove_irqs(pdev);
1339 static void mana_gd_cleanup(struct pci_dev *pdev)
1341 struct gdma_context *gc = pci_get_drvdata(pdev);
1343 mana_hwc_destroy_channel(gc);
1345 mana_gd_remove_irqs(pdev);
1348 static bool mana_is_pf(unsigned short dev_id)
1350 return dev_id == MANA_PF_DEVICE_ID;
1353 static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1355 struct gdma_context *gc;
1356 void __iomem *bar0_va;
1360 /* Each port has 2 CQs, each CQ has at most 1 EQE at a time */
1361 BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
1363 err = pci_enable_device(pdev);
1367 pci_set_master(pdev);
1369 err = pci_request_regions(pdev, "mana");
1373 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1375 goto release_region;
1377 err = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
1379 dev_err(&pdev->dev, "Failed to set dma device segment size\n");
1380 goto release_region;
1384 gc = vzalloc(sizeof(*gc));
1386 goto release_region;
1388 mutex_init(&gc->eq_test_event_mutex);
1389 pci_set_drvdata(pdev, gc);
1390 gc->bar0_pa = pci_resource_start(pdev, 0);
1392 bar0_va = pci_iomap(pdev, bar, 0);
1396 gc->is_pf = mana_is_pf(pdev->device);
1397 gc->bar0_va = bar0_va;
1398 gc->dev = &pdev->dev;
1400 err = mana_gd_setup(pdev);
1404 err = mana_probe(&gc->mana, false);
1411 mana_gd_cleanup(pdev);
1413 pci_iounmap(pdev, bar0_va);
1415 pci_set_drvdata(pdev, NULL);
1418 pci_release_regions(pdev);
1420 pci_clear_master(pdev);
1421 pci_disable_device(pdev);
1422 dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err);
1426 static void mana_gd_remove(struct pci_dev *pdev)
1428 struct gdma_context *gc = pci_get_drvdata(pdev);
1430 mana_remove(&gc->mana, false);
1432 mana_gd_cleanup(pdev);
1434 pci_iounmap(pdev, gc->bar0_va);
1438 pci_release_regions(pdev);
1439 pci_clear_master(pdev);
1440 pci_disable_device(pdev);
1443 /* The 'state' parameter is not used. */
1444 static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
1446 struct gdma_context *gc = pci_get_drvdata(pdev);
1448 mana_remove(&gc->mana, true);
1450 mana_gd_cleanup(pdev);
1455 /* In case the NIC hardware stops working, the suspend and resume callbacks will
1456 * fail -- if this happens, it's safer to just report an error than try to undo
1457 * what has been done.
1459 static int mana_gd_resume(struct pci_dev *pdev)
1461 struct gdma_context *gc = pci_get_drvdata(pdev);
1464 err = mana_gd_setup(pdev);
1468 err = mana_probe(&gc->mana, true);
1475 /* Quiesce the device for kexec. This is also called upon reboot/shutdown. */
1476 static void mana_gd_shutdown(struct pci_dev *pdev)
1478 struct gdma_context *gc = pci_get_drvdata(pdev);
1480 dev_info(&pdev->dev, "Shutdown was called\n");
1482 mana_remove(&gc->mana, true);
1484 mana_gd_cleanup(pdev);
1486 pci_disable_device(pdev);
1489 static const struct pci_device_id mana_id_table[] = {
1490 { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
1491 { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
1495 static struct pci_driver mana_driver = {
1497 .id_table = mana_id_table,
1498 .probe = mana_gd_probe,
1499 .remove = mana_gd_remove,
1500 .suspend = mana_gd_suspend,
1501 .resume = mana_gd_resume,
1502 .shutdown = mana_gd_shutdown,
1505 module_pci_driver(mana_driver);
1507 MODULE_DEVICE_TABLE(pci, mana_id_table);
1509 MODULE_LICENSE("Dual BSD/GPL");
1510 MODULE_DESCRIPTION("Microsoft Azure Network Adapter driver");