4a6af42641415b258882b59955fd222b1ad046d1
[linux-2.6-microblaze.git] / drivers / net / ethernet / qlogic / qed / qed_sriov.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include "qed_hw.h"
10 #include "qed_int.h"
11 #include "qed_reg_addr.h"
12 #include "qed_sriov.h"
13 #include "qed_vf.h"
14
15 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
16                            int rel_vf_id, bool b_enabled_only)
17 {
18         if (!p_hwfn->pf_iov_info) {
19                 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
20                 return false;
21         }
22
23         if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
24             (rel_vf_id < 0))
25                 return false;
26
27         if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
28             b_enabled_only)
29                 return false;
30
31         return true;
32 }
33
34 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
35                                                u16 relative_vf_id,
36                                                bool b_enabled_only)
37 {
38         struct qed_vf_info *vf = NULL;
39
40         if (!p_hwfn->pf_iov_info) {
41                 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
42                 return NULL;
43         }
44
45         if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
46                 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
47         else
48                 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
49                        relative_vf_id);
50
51         return vf;
52 }
53
54 static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
55 {
56         struct qed_hw_sriov_info *iov = cdev->p_iov_info;
57         int pos = iov->pos;
58
59         DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
60         pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
61
62         pci_read_config_word(cdev->pdev,
63                              pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
64         pci_read_config_word(cdev->pdev,
65                              pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
66
67         pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
68         if (iov->num_vfs) {
69                 DP_VERBOSE(cdev,
70                            QED_MSG_IOV,
71                            "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
72                 iov->num_vfs = 0;
73         }
74
75         pci_read_config_word(cdev->pdev,
76                              pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
77
78         pci_read_config_word(cdev->pdev,
79                              pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
80
81         pci_read_config_word(cdev->pdev,
82                              pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
83
84         pci_read_config_dword(cdev->pdev,
85                               pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
86
87         pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
88
89         pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
90
91         DP_VERBOSE(cdev,
92                    QED_MSG_IOV,
93                    "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
94                    iov->nres,
95                    iov->cap,
96                    iov->ctrl,
97                    iov->total_vfs,
98                    iov->initial_vfs,
99                    iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
100
101         /* Some sanity checks */
102         if (iov->num_vfs > NUM_OF_VFS(cdev) ||
103             iov->total_vfs > NUM_OF_VFS(cdev)) {
104                 /* This can happen only due to a bug. In this case we set
105                  * num_vfs to zero to avoid memory corruption in the code that
106                  * assumes max number of vfs
107                  */
108                 DP_NOTICE(cdev,
109                           "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
110                           iov->num_vfs);
111
112                 iov->num_vfs = 0;
113                 iov->total_vfs = 0;
114         }
115
116         return 0;
117 }
118
119 static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
120                                         struct qed_ptt *p_ptt)
121 {
122         struct qed_igu_block *p_sb;
123         u16 sb_id;
124         u32 val;
125
126         if (!p_hwfn->hw_info.p_igu_info) {
127                 DP_ERR(p_hwfn,
128                        "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
129                 return;
130         }
131
132         for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
133              sb_id++) {
134                 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
135                 if ((p_sb->status & QED_IGU_STATUS_FREE) &&
136                     !(p_sb->status & QED_IGU_STATUS_PF)) {
137                         val = qed_rd(p_hwfn, p_ptt,
138                                      IGU_REG_MAPPING_MEMORY + sb_id * 4);
139                         SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
140                         qed_wr(p_hwfn, p_ptt,
141                                IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
142                 }
143         }
144 }
145
146 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
147 {
148         struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
149         struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
150         struct qed_bulletin_content *p_bulletin_virt;
151         dma_addr_t req_p, rply_p, bulletin_p;
152         union pfvf_tlvs *p_reply_virt_addr;
153         union vfpf_tlvs *p_req_virt_addr;
154         u8 idx = 0;
155
156         memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
157
158         p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
159         req_p = p_iov_info->mbx_msg_phys_addr;
160         p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
161         rply_p = p_iov_info->mbx_reply_phys_addr;
162         p_bulletin_virt = p_iov_info->p_bulletins;
163         bulletin_p = p_iov_info->bulletins_phys;
164         if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
165                 DP_ERR(p_hwfn,
166                        "qed_iov_setup_vfdb called without allocating mem first\n");
167                 return;
168         }
169
170         for (idx = 0; idx < p_iov->total_vfs; idx++) {
171                 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
172                 u32 concrete;
173
174                 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
175                 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
176                 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
177                 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
178
179                 vf->state = VF_STOPPED;
180                 vf->b_init = false;
181
182                 vf->bulletin.phys = idx *
183                                     sizeof(struct qed_bulletin_content) +
184                                     bulletin_p;
185                 vf->bulletin.p_virt = p_bulletin_virt + idx;
186                 vf->bulletin.size = sizeof(struct qed_bulletin_content);
187
188                 vf->relative_vf_id = idx;
189                 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
190                 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
191                 vf->concrete_fid = concrete;
192                 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
193                                  (vf->abs_vf_id << 8);
194                 vf->vport_id = idx + 1;
195         }
196 }
197
198 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
199 {
200         struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
201         void **p_v_addr;
202         u16 num_vfs = 0;
203
204         num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
205
206         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
207                    "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
208
209         /* Allocate PF Mailbox buffer (per-VF) */
210         p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
211         p_v_addr = &p_iov_info->mbx_msg_virt_addr;
212         *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
213                                        p_iov_info->mbx_msg_size,
214                                        &p_iov_info->mbx_msg_phys_addr,
215                                        GFP_KERNEL);
216         if (!*p_v_addr)
217                 return -ENOMEM;
218
219         /* Allocate PF Mailbox Reply buffer (per-VF) */
220         p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
221         p_v_addr = &p_iov_info->mbx_reply_virt_addr;
222         *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
223                                        p_iov_info->mbx_reply_size,
224                                        &p_iov_info->mbx_reply_phys_addr,
225                                        GFP_KERNEL);
226         if (!*p_v_addr)
227                 return -ENOMEM;
228
229         p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
230                                      num_vfs;
231         p_v_addr = &p_iov_info->p_bulletins;
232         *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
233                                        p_iov_info->bulletins_size,
234                                        &p_iov_info->bulletins_phys,
235                                        GFP_KERNEL);
236         if (!*p_v_addr)
237                 return -ENOMEM;
238
239         DP_VERBOSE(p_hwfn,
240                    QED_MSG_IOV,
241                    "PF's Requests mailbox [%p virt 0x%llx phys],  Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
242                    p_iov_info->mbx_msg_virt_addr,
243                    (u64) p_iov_info->mbx_msg_phys_addr,
244                    p_iov_info->mbx_reply_virt_addr,
245                    (u64) p_iov_info->mbx_reply_phys_addr,
246                    p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
247
248         return 0;
249 }
250
251 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
252 {
253         struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
254
255         if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
256                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
257                                   p_iov_info->mbx_msg_size,
258                                   p_iov_info->mbx_msg_virt_addr,
259                                   p_iov_info->mbx_msg_phys_addr);
260
261         if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
262                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
263                                   p_iov_info->mbx_reply_size,
264                                   p_iov_info->mbx_reply_virt_addr,
265                                   p_iov_info->mbx_reply_phys_addr);
266
267         if (p_iov_info->p_bulletins)
268                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
269                                   p_iov_info->bulletins_size,
270                                   p_iov_info->p_bulletins,
271                                   p_iov_info->bulletins_phys);
272 }
273
274 int qed_iov_alloc(struct qed_hwfn *p_hwfn)
275 {
276         struct qed_pf_iov *p_sriov;
277
278         if (!IS_PF_SRIOV(p_hwfn)) {
279                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
280                            "No SR-IOV - no need for IOV db\n");
281                 return 0;
282         }
283
284         p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
285         if (!p_sriov) {
286                 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
287                 return -ENOMEM;
288         }
289
290         p_hwfn->pf_iov_info = p_sriov;
291
292         return qed_iov_allocate_vfdb(p_hwfn);
293 }
294
295 void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
296 {
297         if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
298                 return;
299
300         qed_iov_setup_vfdb(p_hwfn);
301         qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
302 }
303
304 void qed_iov_free(struct qed_hwfn *p_hwfn)
305 {
306         if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
307                 qed_iov_free_vfdb(p_hwfn);
308                 kfree(p_hwfn->pf_iov_info);
309         }
310 }
311
312 void qed_iov_free_hw_info(struct qed_dev *cdev)
313 {
314         kfree(cdev->p_iov_info);
315         cdev->p_iov_info = NULL;
316 }
317
318 int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
319 {
320         struct qed_dev *cdev = p_hwfn->cdev;
321         int pos;
322         int rc;
323
324         /* Learn the PCI configuration */
325         pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
326                                       PCI_EXT_CAP_ID_SRIOV);
327         if (!pos) {
328                 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
329                 return 0;
330         }
331
332         /* Allocate a new struct for IOV information */
333         cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
334         if (!cdev->p_iov_info) {
335                 DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
336                 return -ENOMEM;
337         }
338         cdev->p_iov_info->pos = pos;
339
340         rc = qed_iov_pci_cfg_info(cdev);
341         if (rc)
342                 return rc;
343
344         /* We want PF IOV to be synonemous with the existance of p_iov_info;
345          * In case the capability is published but there are no VFs, simply
346          * de-allocate the struct.
347          */
348         if (!cdev->p_iov_info->total_vfs) {
349                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
350                            "IOV capabilities, but no VFs are published\n");
351                 kfree(cdev->p_iov_info);
352                 cdev->p_iov_info = NULL;
353                 return 0;
354         }
355
356         /* Calculate the first VF index - this is a bit tricky; Basically,
357          * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
358          * after the first engine's VFs.
359          */
360         cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
361                                            p_hwfn->abs_pf_id - 16;
362         if (QED_PATH_ID(p_hwfn))
363                 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
364
365         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
366                    "First VF in hwfn 0x%08x\n",
367                    cdev->p_iov_info->first_vf_in_pf);
368
369         return 0;
370 }
371
372 static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
373 {
374         /* Check PF supports sriov */
375         if (!IS_QED_SRIOV(p_hwfn->cdev) || !IS_PF_SRIOV_ALLOC(p_hwfn))
376                 return false;
377
378         /* Check VF validity */
379         if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true))
380                 return false;
381
382         return true;
383 }
384
385 static bool qed_iov_tlv_supported(u16 tlvtype)
386 {
387         return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
388 }
389
390 /* place a given tlv on the tlv buffer, continuing current tlv list */
391 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
392 {
393         struct channel_tlv *tl = (struct channel_tlv *)*offset;
394
395         tl->type = type;
396         tl->length = length;
397
398         /* Offset should keep pointing to next TLV (the end of the last) */
399         *offset += length;
400
401         /* Return a pointer to the start of the added tlv */
402         return *offset - length;
403 }
404
405 /* list the types and lengths of the tlvs on the buffer */
406 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
407 {
408         u16 i = 1, total_length = 0;
409         struct channel_tlv *tlv;
410
411         do {
412                 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
413
414                 /* output tlv */
415                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
416                            "TLV number %d: type %d, length %d\n",
417                            i, tlv->type, tlv->length);
418
419                 if (tlv->type == CHANNEL_TLV_LIST_END)
420                         return;
421
422                 /* Validate entry - protect against malicious VFs */
423                 if (!tlv->length) {
424                         DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
425                         return;
426                 }
427
428                 total_length += tlv->length;
429
430                 if (total_length >= sizeof(struct tlv_buffer_size)) {
431                         DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
432                         return;
433                 }
434
435                 i++;
436         } while (1);
437 }
438
439 static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
440                                   struct qed_ptt *p_ptt,
441                                   struct qed_vf_info *p_vf,
442                                   u16 length, u8 status)
443 {
444         struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
445         struct qed_dmae_params params;
446         u8 eng_vf_id;
447
448         mbx->reply_virt->default_resp.hdr.status = status;
449
450         qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
451
452         eng_vf_id = p_vf->abs_vf_id;
453
454         memset(&params, 0, sizeof(struct qed_dmae_params));
455         params.flags = QED_DMAE_FLAG_VF_DST;
456         params.dst_vfid = eng_vf_id;
457
458         qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
459                            mbx->req_virt->first_tlv.reply_address +
460                            sizeof(u64),
461                            (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
462                            &params);
463
464         qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
465                            mbx->req_virt->first_tlv.reply_address,
466                            sizeof(u64) / 4, &params);
467
468         REG_WR(p_hwfn,
469                GTT_BAR0_MAP_REG_USDM_RAM +
470                USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
471 }
472
473 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
474                                  struct qed_ptt *p_ptt,
475                                  struct qed_vf_info *vf_info,
476                                  u16 type, u16 length, u8 status)
477 {
478         struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
479
480         mbx->offset = (u8 *)mbx->reply_virt;
481
482         qed_add_tlv(p_hwfn, &mbx->offset, type, length);
483         qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
484                     sizeof(struct channel_list_end_tlv));
485
486         qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
487 }
488
489 static void qed_iov_process_mbx_dummy_resp(struct qed_hwfn *p_hwfn,
490                                            struct qed_ptt *p_ptt,
491                                            struct qed_vf_info *p_vf)
492 {
493         qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_NONE,
494                              sizeof(struct pfvf_def_resp_tlv),
495                              PFVF_STATUS_SUCCESS);
496 }
497
498 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
499                                     struct qed_ptt *p_ptt, int vfid)
500 {
501         struct qed_iov_vf_mbx *mbx;
502         struct qed_vf_info *p_vf;
503         int i;
504
505         p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
506         if (!p_vf)
507                 return;
508
509         mbx = &p_vf->vf_mbx;
510
511         /* qed_iov_process_mbx_request */
512         DP_VERBOSE(p_hwfn,
513                    QED_MSG_IOV,
514                    "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
515
516         mbx->first_tlv = mbx->req_virt->first_tlv;
517
518         /* check if tlv type is known */
519         if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
520                 qed_iov_process_mbx_dummy_resp(p_hwfn, p_ptt, p_vf);
521         } else {
522                 /* unknown TLV - this may belong to a VF driver from the future
523                  * - a version written after this PF driver was written, which
524                  * supports features unknown as of yet. Too bad since we don't
525                  * support them. Or this may be because someone wrote a crappy
526                  * VF driver and is sending garbage over the channel.
527                  */
528                 DP_ERR(p_hwfn,
529                        "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
530                        mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
531
532                 for (i = 0; i < 20; i++) {
533                         DP_VERBOSE(p_hwfn,
534                                    QED_MSG_IOV,
535                                    "%x ",
536                                    mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
537                 }
538         }
539 }
540
541 void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
542 {
543         u64 add_bit = 1ULL << (vfid % 64);
544
545         p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
546 }
547
548 static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
549                                                     u64 *events)
550 {
551         u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
552
553         memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
554         memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
555 }
556
557 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
558                               u16 abs_vfid, struct regpair *vf_msg)
559 {
560         u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
561         struct qed_vf_info *p_vf;
562
563         if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
564                 DP_VERBOSE(p_hwfn,
565                            QED_MSG_IOV,
566                            "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
567                            abs_vfid);
568                 return 0;
569         }
570         p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
571
572         /* List the physical address of the request so that handler
573          * could later on copy the message from it.
574          */
575         p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
576
577         /* Mark the event and schedule the workqueue */
578         qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
579         qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
580
581         return 0;
582 }
583
584 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
585                         u8 opcode, __le16 echo, union event_ring_data *data)
586 {
587         switch (opcode) {
588         case COMMON_EVENT_VF_PF_CHANNEL:
589                 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
590                                           &data->vf_pf_channel.msg_addr);
591         default:
592                 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
593                         opcode);
594                 return -EINVAL;
595         }
596 }
597
598 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
599 {
600         struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
601         u16 i;
602
603         if (!p_iov)
604                 goto out;
605
606         for (i = rel_vf_id; i < p_iov->total_vfs; i++)
607                 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
608                         return i;
609
610 out:
611         return MAX_NUM_VFS;
612 }
613
614 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
615                                int vfid)
616 {
617         struct qed_dmae_params params;
618         struct qed_vf_info *vf_info;
619
620         vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
621         if (!vf_info)
622                 return -EINVAL;
623
624         memset(&params, 0, sizeof(struct qed_dmae_params));
625         params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
626         params.src_vfid = vf_info->abs_vf_id;
627
628         if (qed_dmae_host2host(p_hwfn, ptt,
629                                vf_info->vf_mbx.pending_req,
630                                vf_info->vf_mbx.req_phys,
631                                sizeof(union vfpf_tlvs) / 4, &params)) {
632                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
633                            "Failed to copy message from VF 0x%02x\n", vfid);
634
635                 return -EIO;
636         }
637
638         return 0;
639 }
640
641 /**
642  * qed_schedule_iov - schedules IOV task for VF and PF
643  * @hwfn: hardware function pointer
644  * @flag: IOV flag for VF/PF
645  */
646 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
647 {
648         smp_mb__before_atomic();
649         set_bit(flag, &hwfn->iov_task_flags);
650         smp_mb__after_atomic();
651         DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
652         queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
653 }
654
655 static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
656 {
657         u64 events[QED_VF_ARRAY_LENGTH];
658         struct qed_ptt *ptt;
659         int i;
660
661         ptt = qed_ptt_acquire(hwfn);
662         if (!ptt) {
663                 DP_VERBOSE(hwfn, QED_MSG_IOV,
664                            "Can't acquire PTT; re-scheduling\n");
665                 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
666                 return;
667         }
668
669         qed_iov_pf_get_and_clear_pending_events(hwfn, events);
670
671         DP_VERBOSE(hwfn, QED_MSG_IOV,
672                    "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
673                    events[0], events[1], events[2]);
674
675         qed_for_each_vf(hwfn, i) {
676                 /* Skip VFs with no pending messages */
677                 if (!(events[i / 64] & (1ULL << (i % 64))))
678                         continue;
679
680                 DP_VERBOSE(hwfn, QED_MSG_IOV,
681                            "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
682                            i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
683
684                 /* Copy VF's message to PF's request buffer for that VF */
685                 if (qed_iov_copy_vf_msg(hwfn, ptt, i))
686                         continue;
687
688                 qed_iov_process_mbx_req(hwfn, ptt, i);
689         }
690
691         qed_ptt_release(hwfn, ptt);
692 }
693
694 void qed_iov_pf_task(struct work_struct *work)
695 {
696         struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
697                                              iov_task.work);
698
699         if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
700                 return;
701
702         if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
703                 qed_handle_vf_msg(hwfn);
704 }
705
706 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
707 {
708         int i;
709
710         for_each_hwfn(cdev, i) {
711                 if (!cdev->hwfns[i].iov_wq)
712                         continue;
713
714                 if (schedule_first) {
715                         qed_schedule_iov(&cdev->hwfns[i],
716                                          QED_IOV_WQ_STOP_WQ_FLAG);
717                         cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
718                 }
719
720                 flush_workqueue(cdev->hwfns[i].iov_wq);
721                 destroy_workqueue(cdev->hwfns[i].iov_wq);
722         }
723 }
724
725 int qed_iov_wq_start(struct qed_dev *cdev)
726 {
727         char name[NAME_SIZE];
728         int i;
729
730         for_each_hwfn(cdev, i) {
731                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
732
733                 /* PFs needs a dedicated workqueue only if they support IOV. */
734                 if (!IS_PF_SRIOV(p_hwfn))
735                         continue;
736
737                 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
738                          cdev->pdev->bus->number,
739                          PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
740
741                 p_hwfn->iov_wq = create_singlethread_workqueue(name);
742                 if (!p_hwfn->iov_wq) {
743                         DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
744                         return -ENOMEM;
745                 }
746
747                 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
748         }
749
750         return 0;
751 }