1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel IFC VF NIC driver for virtio dataplane offloading
5 * Copyright (C) 2020 Intel Corporation.
7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
11 #include "ifcvf_base.h"
13 static inline u8 ifc_ioread8(u8 __iomem *addr)
17 static inline u16 ifc_ioread16 (__le16 __iomem *addr)
19 return ioread16(addr);
22 static inline u32 ifc_ioread32(__le32 __iomem *addr)
24 return ioread32(addr);
27 static inline void ifc_iowrite8(u8 value, u8 __iomem *addr)
29 iowrite8(value, addr);
32 static inline void ifc_iowrite16(u16 value, __le16 __iomem *addr)
34 iowrite16(value, addr);
37 static inline void ifc_iowrite32(u32 value, __le32 __iomem *addr)
39 iowrite32(value, addr);
42 static void ifc_iowrite64_twopart(u64 val,
43 __le32 __iomem *lo, __le32 __iomem *hi)
45 ifc_iowrite32((u32)val, lo);
46 ifc_iowrite32(val >> 32, hi);
49 struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
51 return container_of(hw, struct ifcvf_adapter, vf);
54 static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
55 struct virtio_pci_cap *cap)
57 struct ifcvf_adapter *ifcvf;
62 length = le32_to_cpu(cap->length);
63 offset = le32_to_cpu(cap->offset);
66 ifcvf= vf_to_adapter(hw);
69 if (bar >= IFCVF_PCI_MAX_RESOURCE) {
71 "Invalid bar number %u to get capabilities\n", bar);
75 if (offset + length > pci_resource_len(pdev, bar)) {
77 "offset(%u) + len(%u) overflows bar%u's capability\n",
82 return hw->base[bar] + offset;
85 static int ifcvf_read_config_range(struct pci_dev *dev,
86 uint32_t *val, int size, int where)
90 for (i = 0; i < size; i += 4) {
91 ret = pci_read_config_dword(dev, where + i, val + i / 4);
99 int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
101 struct virtio_pci_cap cap;
107 ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
109 IFCVF_ERR(pdev, "Failed to read PCI capability list\n");
114 ret = ifcvf_read_config_range(pdev, (u32 *)&cap,
118 "Failed to get PCI capability at %x\n", pos);
122 if (cap.cap_vndr != PCI_CAP_ID_VNDR)
125 switch (cap.cfg_type) {
126 case VIRTIO_PCI_CAP_COMMON_CFG:
127 hw->common_cfg = get_cap_addr(hw, &cap);
128 IFCVF_DBG(pdev, "hw->common_cfg = %p\n",
131 case VIRTIO_PCI_CAP_NOTIFY_CFG:
132 pci_read_config_dword(pdev, pos + sizeof(cap),
133 &hw->notify_off_multiplier);
134 hw->notify_bar = cap.bar;
135 hw->notify_base = get_cap_addr(hw, &cap);
136 hw->notify_base_pa = pci_resource_start(pdev, cap.bar) +
137 le32_to_cpu(cap.offset);
138 IFCVF_DBG(pdev, "hw->notify_base = %p\n",
141 case VIRTIO_PCI_CAP_ISR_CFG:
142 hw->isr = get_cap_addr(hw, &cap);
143 IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr);
145 case VIRTIO_PCI_CAP_DEVICE_CFG:
146 hw->net_cfg = get_cap_addr(hw, &cap);
147 IFCVF_DBG(pdev, "hw->net_cfg = %p\n", hw->net_cfg);
155 if (hw->common_cfg == NULL || hw->notify_base == NULL ||
156 hw->isr == NULL || hw->net_cfg == NULL) {
157 IFCVF_ERR(pdev, "Incomplete PCI capabilities\n");
161 for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
162 ifc_iowrite16(i, &hw->common_cfg->queue_select);
163 notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off);
164 hw->vring[i].notify_addr = hw->notify_base +
165 notify_off * hw->notify_off_multiplier;
166 hw->vring[i].notify_pa = hw->notify_base_pa +
167 notify_off * hw->notify_off_multiplier;
170 hw->lm_cfg = hw->base[IFCVF_LM_BAR];
173 "PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
174 hw->common_cfg, hw->notify_base, hw->isr,
175 hw->net_cfg, hw->notify_off_multiplier);
180 u8 ifcvf_get_status(struct ifcvf_hw *hw)
182 return ifc_ioread8(&hw->common_cfg->device_status);
185 void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
187 ifc_iowrite8(status, &hw->common_cfg->device_status);
190 void ifcvf_reset(struct ifcvf_hw *hw)
192 hw->config_cb.callback = NULL;
193 hw->config_cb.private = NULL;
195 ifcvf_set_status(hw, 0);
196 /* flush set_status, make sure VF is stopped, reset */
197 ifcvf_get_status(hw);
200 static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
203 status |= ifcvf_get_status(hw);
205 ifcvf_set_status(hw, status);
206 ifcvf_get_status(hw);
209 u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
211 struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
212 u32 features_lo, features_hi;
215 ifc_iowrite32(0, &cfg->device_feature_select);
216 features_lo = ifc_ioread32(&cfg->device_feature);
218 ifc_iowrite32(1, &cfg->device_feature_select);
219 features_hi = ifc_ioread32(&cfg->device_feature);
221 features = ((u64)features_hi << 32) | features_lo;
226 u64 ifcvf_get_features(struct ifcvf_hw *hw)
228 return hw->hw_features;
231 int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
233 struct ifcvf_adapter *ifcvf = vf_to_adapter(hw);
235 if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
236 IFCVF_ERR(ifcvf->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
243 void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
244 void *dst, int length)
246 u8 old_gen, new_gen, *p;
249 WARN_ON(offset + length > sizeof(struct virtio_net_config));
251 old_gen = ifc_ioread8(&hw->common_cfg->config_generation);
253 for (i = 0; i < length; i++)
254 *p++ = ifc_ioread8(hw->net_cfg + offset + i);
256 new_gen = ifc_ioread8(&hw->common_cfg->config_generation);
257 } while (old_gen != new_gen);
260 void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
261 const void *src, int length)
267 WARN_ON(offset + length > sizeof(struct virtio_net_config));
268 for (i = 0; i < length; i++)
269 ifc_iowrite8(*p++, hw->net_cfg + offset + i);
272 static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
274 struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
276 ifc_iowrite32(0, &cfg->guest_feature_select);
277 ifc_iowrite32((u32)features, &cfg->guest_feature);
279 ifc_iowrite32(1, &cfg->guest_feature_select);
280 ifc_iowrite32(features >> 32, &cfg->guest_feature);
283 static int ifcvf_config_features(struct ifcvf_hw *hw)
285 struct ifcvf_adapter *ifcvf;
287 ifcvf = vf_to_adapter(hw);
288 ifcvf_set_features(hw, hw->req_features);
289 ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK);
291 if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) {
292 IFCVF_ERR(ifcvf->pdev, "Failed to set FEATURES_OK status\n");
299 u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
301 struct ifcvf_lm_cfg __iomem *ifcvf_lm;
302 void __iomem *avail_idx_addr;
306 ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
307 q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
308 avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
309 last_avail_idx = ifc_ioread16(avail_idx_addr);
311 return last_avail_idx;
314 int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
316 struct ifcvf_lm_cfg __iomem *ifcvf_lm;
317 void __iomem *avail_idx_addr;
320 ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
321 q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
322 avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
323 hw->vring[qid].last_avail_idx = num;
324 ifc_iowrite16(num, avail_idx_addr);
329 static int ifcvf_hw_enable(struct ifcvf_hw *hw)
331 struct virtio_pci_common_cfg __iomem *cfg;
332 struct ifcvf_adapter *ifcvf;
335 ifcvf = vf_to_adapter(hw);
336 cfg = hw->common_cfg;
337 ifc_iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
339 if (ifc_ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
340 IFCVF_ERR(ifcvf->pdev, "No msix vector for device config\n");
344 for (i = 0; i < hw->nr_vring; i++) {
345 if (!hw->vring[i].ready)
348 ifc_iowrite16(i, &cfg->queue_select);
349 ifc_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
350 &cfg->queue_desc_hi);
351 ifc_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
352 &cfg->queue_avail_hi);
353 ifc_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
354 &cfg->queue_used_hi);
355 ifc_iowrite16(hw->vring[i].size, &cfg->queue_size);
356 ifc_iowrite16(i + IFCVF_MSI_QUEUE_OFF, &cfg->queue_msix_vector);
358 if (ifc_ioread16(&cfg->queue_msix_vector) ==
359 VIRTIO_MSI_NO_VECTOR) {
360 IFCVF_ERR(ifcvf->pdev,
361 "No msix vector for queue %u\n", i);
365 ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
366 ifc_iowrite16(1, &cfg->queue_enable);
372 static void ifcvf_hw_disable(struct ifcvf_hw *hw)
374 struct virtio_pci_common_cfg __iomem *cfg;
377 cfg = hw->common_cfg;
378 ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->msix_config);
380 for (i = 0; i < hw->nr_vring; i++) {
381 ifc_iowrite16(i, &cfg->queue_select);
382 ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->queue_msix_vector);
385 ifc_ioread16(&cfg->queue_msix_vector);
388 int ifcvf_start_hw(struct ifcvf_hw *hw)
391 ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
392 ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
394 if (ifcvf_config_features(hw) < 0)
397 if (ifcvf_hw_enable(hw) < 0)
400 ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
405 void ifcvf_stop_hw(struct ifcvf_hw *hw)
407 ifcvf_hw_disable(hw);
411 void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
413 ifc_iowrite16(qid, hw->vring[qid].notify_addr);