Merge tag 'drm-misc-next-2023-07-21' of ssh://git.freedesktop.org/git/drm/drm-misc...
[linux-2.6-microblaze.git] / drivers / vdpa / ifcvf / ifcvf_base.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel IFC VF NIC driver for virtio dataplane offloading
4  *
5  * Copyright (C) 2020 Intel Corporation.
6  *
7  * Author: Zhu Lingshan <lingshan.zhu@intel.com>
8  *
9  */
10
11 #include "ifcvf_base.h"
12
13 u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector)
14 {
15         struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
16
17         vp_iowrite16(qid, &cfg->queue_select);
18         vp_iowrite16(vector, &cfg->queue_msix_vector);
19
20         return vp_ioread16(&cfg->queue_msix_vector);
21 }
22
23 u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
24 {
25         struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
26
27         vp_iowrite16(vector,  &cfg->msix_config);
28
29         return vp_ioread16(&cfg->msix_config);
30 }
31
32 static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
33                                   struct virtio_pci_cap *cap)
34 {
35         u32 length, offset;
36         u8 bar;
37
38         length = le32_to_cpu(cap->length);
39         offset = le32_to_cpu(cap->offset);
40         bar = cap->bar;
41
42         if (bar >= IFCVF_PCI_MAX_RESOURCE) {
43                 IFCVF_DBG(hw->pdev,
44                           "Invalid bar number %u to get capabilities\n", bar);
45                 return NULL;
46         }
47
48         if (offset + length > pci_resource_len(hw->pdev, bar)) {
49                 IFCVF_DBG(hw->pdev,
50                           "offset(%u) + len(%u) overflows bar%u's capability\n",
51                           offset, length, bar);
52                 return NULL;
53         }
54
55         return hw->base[bar] + offset;
56 }
57
58 static int ifcvf_read_config_range(struct pci_dev *dev,
59                                    uint32_t *val, int size, int where)
60 {
61         int ret, i;
62
63         for (i = 0; i < size; i += 4) {
64                 ret = pci_read_config_dword(dev, where + i, val + i / 4);
65                 if (ret < 0)
66                         return ret;
67         }
68
69         return 0;
70 }
71
72 static u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid)
73 {
74         u16 queue_size;
75
76         vp_iowrite16(qid, &hw->common_cfg->queue_select);
77         queue_size = vp_ioread16(&hw->common_cfg->queue_size);
78
79         return queue_size;
80 }
81
82 /* This function returns the max allowed safe size for
83  * all virtqueues. It is the minimal size that can be
84  * suppprted by all virtqueues.
85  */
86 u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw)
87 {
88         u16 queue_size, max_size, qid;
89
90         max_size = ifcvf_get_vq_size(hw, 0);
91         for (qid = 1; qid < hw->nr_vring; qid++) {
92                 queue_size = ifcvf_get_vq_size(hw, qid);
93                 /* 0 means the queue is unavailable */
94                 if (!queue_size)
95                         continue;
96
97                 max_size = min(queue_size, max_size);
98         }
99
100         return max_size;
101 }
102
103 int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
104 {
105         struct virtio_pci_cap cap;
106         u16 notify_off;
107         int ret;
108         u8 pos;
109         u32 i;
110
111         ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
112         if (ret < 0) {
113                 IFCVF_ERR(pdev, "Failed to read PCI capability list\n");
114                 return -EIO;
115         }
116         hw->pdev = pdev;
117
118         while (pos) {
119                 ret = ifcvf_read_config_range(pdev, (u32 *)&cap,
120                                               sizeof(cap), pos);
121                 if (ret < 0) {
122                         IFCVF_ERR(pdev,
123                                   "Failed to get PCI capability at %x\n", pos);
124                         break;
125                 }
126
127                 if (cap.cap_vndr != PCI_CAP_ID_VNDR)
128                         goto next;
129
130                 switch (cap.cfg_type) {
131                 case VIRTIO_PCI_CAP_COMMON_CFG:
132                         hw->common_cfg = get_cap_addr(hw, &cap);
133                         IFCVF_DBG(pdev, "hw->common_cfg = %p\n",
134                                   hw->common_cfg);
135                         break;
136                 case VIRTIO_PCI_CAP_NOTIFY_CFG:
137                         pci_read_config_dword(pdev, pos + sizeof(cap),
138                                               &hw->notify_off_multiplier);
139                         hw->notify_bar = cap.bar;
140                         hw->notify_base = get_cap_addr(hw, &cap);
141                         hw->notify_base_pa = pci_resource_start(pdev, cap.bar) +
142                                         le32_to_cpu(cap.offset);
143                         IFCVF_DBG(pdev, "hw->notify_base = %p\n",
144                                   hw->notify_base);
145                         break;
146                 case VIRTIO_PCI_CAP_ISR_CFG:
147                         hw->isr = get_cap_addr(hw, &cap);
148                         IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr);
149                         break;
150                 case VIRTIO_PCI_CAP_DEVICE_CFG:
151                         hw->dev_cfg = get_cap_addr(hw, &cap);
152                         hw->cap_dev_config_size = le32_to_cpu(cap.length);
153                         IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg);
154                         break;
155                 }
156
157 next:
158                 pos = cap.cap_next;
159         }
160
161         if (hw->common_cfg == NULL || hw->notify_base == NULL ||
162             hw->isr == NULL || hw->dev_cfg == NULL) {
163                 IFCVF_ERR(pdev, "Incomplete PCI capabilities\n");
164                 return -EIO;
165         }
166
167         hw->nr_vring = vp_ioread16(&hw->common_cfg->num_queues);
168         hw->vring = kzalloc(sizeof(struct vring_info) * hw->nr_vring, GFP_KERNEL);
169         if (!hw->vring)
170                 return -ENOMEM;
171
172         for (i = 0; i < hw->nr_vring; i++) {
173                 vp_iowrite16(i, &hw->common_cfg->queue_select);
174                 notify_off = vp_ioread16(&hw->common_cfg->queue_notify_off);
175                 hw->vring[i].notify_addr = hw->notify_base +
176                         notify_off * hw->notify_off_multiplier;
177                 hw->vring[i].notify_pa = hw->notify_base_pa +
178                         notify_off * hw->notify_off_multiplier;
179                 hw->vring[i].irq = -EINVAL;
180         }
181
182         hw->lm_cfg = hw->base[IFCVF_LM_BAR];
183
184         IFCVF_DBG(pdev,
185                   "PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
186                   hw->common_cfg, hw->notify_base, hw->isr,
187                   hw->dev_cfg, hw->notify_off_multiplier);
188
189         hw->vqs_reused_irq = -EINVAL;
190         hw->config_irq = -EINVAL;
191
192         return 0;
193 }
194
195 u8 ifcvf_get_status(struct ifcvf_hw *hw)
196 {
197         return vp_ioread8(&hw->common_cfg->device_status);
198 }
199
200 void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
201 {
202         vp_iowrite8(status, &hw->common_cfg->device_status);
203 }
204
205 void ifcvf_reset(struct ifcvf_hw *hw)
206 {
207         ifcvf_set_status(hw, 0);
208         while (ifcvf_get_status(hw))
209                 msleep(1);
210 }
211
212 u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
213 {
214         struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
215         u32 features_lo, features_hi;
216         u64 features;
217
218         vp_iowrite32(0, &cfg->device_feature_select);
219         features_lo = vp_ioread32(&cfg->device_feature);
220
221         vp_iowrite32(1, &cfg->device_feature_select);
222         features_hi = vp_ioread32(&cfg->device_feature);
223
224         features = ((u64)features_hi << 32) | features_lo;
225
226         return features;
227 }
228
229 /* return provisioned vDPA dev features */
230 u64 ifcvf_get_dev_features(struct ifcvf_hw *hw)
231 {
232         return hw->dev_features;
233 }
234
235 u64 ifcvf_get_driver_features(struct ifcvf_hw *hw)
236 {
237         struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
238         u32 features_lo, features_hi;
239         u64 features;
240
241         vp_iowrite32(0, &cfg->device_feature_select);
242         features_lo = vp_ioread32(&cfg->guest_feature);
243
244         vp_iowrite32(1, &cfg->device_feature_select);
245         features_hi = vp_ioread32(&cfg->guest_feature);
246
247         features = ((u64)features_hi << 32) | features_lo;
248
249         return features;
250 }
251
252 int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
253 {
254         if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
255                 IFCVF_ERR(hw->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
256                 return -EINVAL;
257         }
258
259         return 0;
260 }
261
262 u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
263 {
264         u32 net_config_size = sizeof(struct virtio_net_config);
265         u32 blk_config_size = sizeof(struct virtio_blk_config);
266         u32 cap_size = hw->cap_dev_config_size;
267         u32 config_size;
268
269         /* If the onboard device config space size is greater than
270          * the size of struct virtio_net/blk_config, only the spec
271          * implementing contents size is returned, this is very
272          * unlikely, defensive programming.
273          */
274         switch (hw->dev_type) {
275         case VIRTIO_ID_NET:
276                 config_size = min(cap_size, net_config_size);
277                 break;
278         case VIRTIO_ID_BLOCK:
279                 config_size = min(cap_size, blk_config_size);
280                 break;
281         default:
282                 config_size = 0;
283                 IFCVF_ERR(hw->pdev, "VIRTIO ID %u not supported\n", hw->dev_type);
284         }
285
286         return config_size;
287 }
288
289 void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
290                            void *dst, int length)
291 {
292         u8 old_gen, new_gen, *p;
293         int i;
294
295         WARN_ON(offset + length > hw->config_size);
296         do {
297                 old_gen = vp_ioread8(&hw->common_cfg->config_generation);
298                 p = dst;
299                 for (i = 0; i < length; i++)
300                         *p++ = vp_ioread8(hw->dev_cfg + offset + i);
301
302                 new_gen = vp_ioread8(&hw->common_cfg->config_generation);
303         } while (old_gen != new_gen);
304 }
305
306 void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
307                             const void *src, int length)
308 {
309         const u8 *p;
310         int i;
311
312         p = src;
313         WARN_ON(offset + length > hw->config_size);
314         for (i = 0; i < length; i++)
315                 vp_iowrite8(*p++, hw->dev_cfg + offset + i);
316 }
317
318 void ifcvf_set_driver_features(struct ifcvf_hw *hw, u64 features)
319 {
320         struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
321
322         vp_iowrite32(0, &cfg->guest_feature_select);
323         vp_iowrite32((u32)features, &cfg->guest_feature);
324
325         vp_iowrite32(1, &cfg->guest_feature_select);
326         vp_iowrite32(features >> 32, &cfg->guest_feature);
327 }
328
329 u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
330 {
331         struct ifcvf_lm_cfg  __iomem *lm_cfg = hw->lm_cfg;
332         u16 last_avail_idx;
333
334         last_avail_idx = vp_ioread16(&lm_cfg->vq_state_region + qid * 2);
335
336         return last_avail_idx;
337 }
338
339 int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
340 {
341         struct ifcvf_lm_cfg  __iomem *lm_cfg = hw->lm_cfg;
342
343         vp_iowrite16(num, &lm_cfg->vq_state_region + qid * 2);
344
345         return 0;
346 }
347
348 void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
349 {
350         struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
351
352         vp_iowrite16(qid, &cfg->queue_select);
353         vp_iowrite16(num, &cfg->queue_size);
354 }
355
356 int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
357                          u64 driver_area, u64 device_area)
358 {
359         struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
360
361         vp_iowrite16(qid, &cfg->queue_select);
362         vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
363                              &cfg->queue_desc_hi);
364         vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
365                              &cfg->queue_avail_hi);
366         vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
367                              &cfg->queue_used_hi);
368
369         return 0;
370 }
371
372 bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
373 {
374         struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
375         u16 queue_enable;
376
377         vp_iowrite16(qid, &cfg->queue_select);
378         queue_enable = vp_ioread16(&cfg->queue_enable);
379
380         return (bool)queue_enable;
381 }
382
383 void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
384 {
385         struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
386
387         vp_iowrite16(qid, &cfg->queue_select);
388         vp_iowrite16(ready, &cfg->queue_enable);
389 }
390
391 static void ifcvf_reset_vring(struct ifcvf_hw *hw)
392 {
393         u16 qid;
394
395         for (qid = 0; qid < hw->nr_vring; qid++) {
396                 hw->vring[qid].cb.callback = NULL;
397                 hw->vring[qid].cb.private = NULL;
398                 ifcvf_set_vq_vector(hw, qid, VIRTIO_MSI_NO_VECTOR);
399         }
400 }
401
402 static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
403 {
404         hw->config_cb.callback = NULL;
405         hw->config_cb.private = NULL;
406         ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
407 }
408
409 static void ifcvf_synchronize_irq(struct ifcvf_hw *hw)
410 {
411         u32 nvectors = hw->num_msix_vectors;
412         struct pci_dev *pdev = hw->pdev;
413         int i, irq;
414
415         for (i = 0; i < nvectors; i++) {
416                 irq = pci_irq_vector(pdev, i);
417                 if (irq >= 0)
418                         synchronize_irq(irq);
419         }
420 }
421
422 void ifcvf_stop(struct ifcvf_hw *hw)
423 {
424         ifcvf_synchronize_irq(hw);
425         ifcvf_reset_vring(hw);
426         ifcvf_reset_config_handler(hw);
427 }
428
429 void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
430 {
431         vp_iowrite16(qid, hw->vring[qid].notify_addr);
432 }