Merge tag 'dma-mapping-5.14' of git://git.infradead.org/users/hch/dma-mapping
[linux-2.6-microblaze.git] / drivers / crypto / qat / qat_common / adf_pf2vf_msg.c
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2015 - 2020 Intel Corporation */
3 #include <linux/delay.h>
4 #include "adf_accel_devices.h"
5 #include "adf_common_drv.h"
6 #include "adf_pf2vf_msg.h"
7
8 #define ADF_DH895XCC_EP_OFFSET  0x3A000
9 #define ADF_DH895XCC_ERRMSK3    (ADF_DH895XCC_EP_OFFSET + 0x1C)
10 #define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
11 #define ADF_DH895XCC_ERRMSK5    (ADF_DH895XCC_EP_OFFSET + 0xDC)
12 #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
13
14 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
15 {
16         struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
17         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
18         void __iomem *pmisc_bar_addr =
19                 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
20
21         ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
22 }
23
24 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
25 {
26         struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
27         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
28         void __iomem *pmisc_bar_addr =
29                 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
30
31         ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
32 }
33
34 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
35                                  u32 vf_mask)
36 {
37         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
38         struct adf_bar *pmisc =
39                         &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
40         void __iomem *pmisc_addr = pmisc->virt_addr;
41         u32 reg;
42
43         /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
44         if (vf_mask & 0xFFFF) {
45                 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
46                 reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
47                 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
48         }
49
50         /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
51         if (vf_mask >> 16) {
52                 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
53                 reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
54                 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
55         }
56 }
57
58 void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
59 {
60         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
61         struct adf_bar *pmisc =
62                         &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
63         void __iomem *pmisc_addr = pmisc->virt_addr;
64         u32 reg;
65
66         /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
67         if (vf_mask & 0xFFFF) {
68                 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
69                         ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
70                 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
71         }
72
73         /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
74         if (vf_mask >> 16) {
75                 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
76                         ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
77                 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
78         }
79 }
80
81 static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
82 {
83         struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
84         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
85         void __iomem *pmisc_bar_addr =
86                 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
87         u32 val, pf2vf_offset, count = 0;
88         u32 local_in_use_mask, local_in_use_pattern;
89         u32 remote_in_use_mask, remote_in_use_pattern;
90         struct mutex *lock;     /* lock preventing concurrent acces of CSR */
91         u32 int_bit;
92         int ret = 0;
93
94         if (accel_dev->is_vf) {
95                 pf2vf_offset = hw_data->get_pf2vf_offset(0);
96                 lock = &accel_dev->vf.vf2pf_lock;
97                 local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
98                 local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
99                 remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
100                 remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
101                 int_bit = ADF_VF2PF_INT;
102         } else {
103                 pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
104                 lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
105                 local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
106                 local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
107                 remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
108                 remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
109                 int_bit = ADF_PF2VF_INT;
110         }
111
112         mutex_lock(lock);
113
114         /* Check if PF2VF CSR is in use by remote function */
115         val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
116         if ((val & remote_in_use_mask) == remote_in_use_pattern) {
117                 dev_dbg(&GET_DEV(accel_dev),
118                         "PF2VF CSR in use by remote function\n");
119                 ret = -EBUSY;
120                 goto out;
121         }
122
123         /* Attempt to get ownership of PF2VF CSR */
124         msg &= ~local_in_use_mask;
125         msg |= local_in_use_pattern;
126         ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
127
128         /* Wait in case remote func also attempting to get ownership */
129         msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
130
131         val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
132         if ((val & local_in_use_mask) != local_in_use_pattern) {
133                 dev_dbg(&GET_DEV(accel_dev),
134                         "PF2VF CSR in use by remote - collision detected\n");
135                 ret = -EBUSY;
136                 goto out;
137         }
138
139         /*
140          * This function now owns the PV2VF CSR.  The IN_USE_BY pattern must
141          * remain in the PF2VF CSR for all writes including ACK from remote
142          * until this local function relinquishes the CSR.  Send the message
143          * by interrupting the remote.
144          */
145         ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
146
147         /* Wait for confirmation from remote func it received the message */
148         do {
149                 msleep(ADF_IOV_MSG_ACK_DELAY);
150                 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
151         } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
152
153         if (val & int_bit) {
154                 dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
155                 val &= ~int_bit;
156                 ret = -EIO;
157         }
158
159         /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
160         ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
161 out:
162         mutex_unlock(lock);
163         return ret;
164 }
165
166 /**
167  * adf_iov_putmsg() - send PF2VF message
168  * @accel_dev:  Pointer to acceleration device.
169  * @msg:        Message to send
170  * @vf_nr:      VF number to which the message will be sent
171  *
172  * Function sends a message from the PF to a VF
173  *
174  * Return: 0 on success, error code otherwise.
175  */
176 int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
177 {
178         u32 count = 0;
179         int ret;
180
181         do {
182                 ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
183                 if (ret)
184                         msleep(ADF_IOV_MSG_RETRY_DELAY);
185         } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
186
187         return ret;
188 }
189 EXPORT_SYMBOL_GPL(adf_iov_putmsg);
190
191 void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
192 {
193         struct adf_accel_dev *accel_dev = vf_info->accel_dev;
194         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
195         int bar_id = hw_data->get_misc_bar_id(hw_data);
196         struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
197         void __iomem *pmisc_addr = pmisc->virt_addr;
198         u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
199
200         /* Read message from the VF */
201         msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
202
203         /* To ACK, clear the VF2PFINT bit */
204         msg &= ~ADF_VF2PF_INT;
205         ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
206
207         if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
208                 /* Ignore legacy non-system (non-kernel) VF2PF messages */
209                 goto err;
210
211         switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
212         case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
213                 {
214                 u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
215
216                 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
217                          (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
218                           ADF_PF2VF_MSGTYPE_SHIFT) |
219                          (ADF_PFVF_COMPATIBILITY_VERSION <<
220                           ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
221
222                 dev_dbg(&GET_DEV(accel_dev),
223                         "Compatibility Version Request from VF%d vers=%u\n",
224                         vf_nr + 1, vf_compat_ver);
225
226                 if (vf_compat_ver < hw_data->min_iov_compat_ver) {
227                         dev_err(&GET_DEV(accel_dev),
228                                 "VF (vers %d) incompatible with PF (vers %d)\n",
229                                 vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
230                         resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
231                                 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
232                 } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
233                         dev_err(&GET_DEV(accel_dev),
234                                 "VF (vers %d) compat with PF (vers %d) unkn.\n",
235                                 vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
236                         resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
237                                 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
238                 } else {
239                         dev_dbg(&GET_DEV(accel_dev),
240                                 "VF (vers %d) compatible with PF (vers %d)\n",
241                                 vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
242                         resp |= ADF_PF2VF_VF_COMPATIBLE <<
243                                 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
244                 }
245                 }
246                 break;
247         case ADF_VF2PF_MSGTYPE_VERSION_REQ:
248                 dev_dbg(&GET_DEV(accel_dev),
249                         "Legacy VersionRequest received from VF%d 0x%x\n",
250                         vf_nr + 1, msg);
251                 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
252                          (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
253                           ADF_PF2VF_MSGTYPE_SHIFT) |
254                          (ADF_PFVF_COMPATIBILITY_VERSION <<
255                           ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
256                 resp |= ADF_PF2VF_VF_COMPATIBLE <<
257                         ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
258                 /* Set legacy major and minor version num */
259                 resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
260                         1 << ADF_PF2VF_MINORVERSION_SHIFT;
261                 break;
262         case ADF_VF2PF_MSGTYPE_INIT:
263                 {
264                 dev_dbg(&GET_DEV(accel_dev),
265                         "Init message received from VF%d 0x%x\n",
266                         vf_nr + 1, msg);
267                 vf_info->init = true;
268                 }
269                 break;
270         case ADF_VF2PF_MSGTYPE_SHUTDOWN:
271                 {
272                 dev_dbg(&GET_DEV(accel_dev),
273                         "Shutdown message received from VF%d 0x%x\n",
274                         vf_nr + 1, msg);
275                 vf_info->init = false;
276                 }
277                 break;
278         default:
279                 goto err;
280         }
281
282         if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
283                 dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
284
285         /* re-enable interrupt on PF from this VF */
286         adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
287         return;
288 err:
289         dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
290                 vf_nr + 1, msg);
291 }
292
293 void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
294 {
295         struct adf_accel_vf_info *vf;
296         u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
297                 (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
298         int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
299
300         for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
301                 if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
302                         dev_err(&GET_DEV(accel_dev),
303                                 "Failed to send restarting msg to VF%d\n", i);
304         }
305 }
306
307 static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
308 {
309         unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
310         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
311         u32 msg = 0;
312         int ret;
313
314         msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
315         msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
316         msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
317         BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
318
319         /* Send request from VF to PF */
320         ret = adf_iov_putmsg(accel_dev, msg, 0);
321         if (ret) {
322                 dev_err(&GET_DEV(accel_dev),
323                         "Failed to send Compatibility Version Request.\n");
324                 return ret;
325         }
326
327         /* Wait for response */
328         if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
329                                          timeout)) {
330                 dev_err(&GET_DEV(accel_dev),
331                         "IOV request/response message timeout expired\n");
332                 return -EIO;
333         }
334
335         /* Response from PF received, check compatibility */
336         switch (accel_dev->vf.compatible) {
337         case ADF_PF2VF_VF_COMPATIBLE:
338                 break;
339         case ADF_PF2VF_VF_COMPAT_UNKNOWN:
340                 /* VF is newer than PF and decides whether it is compatible */
341                 if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
342                         break;
343                 fallthrough;
344         case ADF_PF2VF_VF_INCOMPATIBLE:
345                 dev_err(&GET_DEV(accel_dev),
346                         "PF (vers %d) and VF (vers %d) are not compatible\n",
347                         accel_dev->vf.pf_version,
348                         ADF_PFVF_COMPATIBILITY_VERSION);
349                 return -EINVAL;
350         default:
351                 dev_err(&GET_DEV(accel_dev),
352                         "Invalid response from PF; assume not compatible\n");
353                 return -EINVAL;
354         }
355         return ret;
356 }
357
358 /**
359  * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
360  *
361  * @accel_dev: Pointer to acceleration device virtual function.
362  *
363  * Return: 0 on success, error code otherwise.
364  */
365 int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
366 {
367         adf_enable_pf2vf_interrupts(accel_dev);
368         return adf_vf2pf_request_version(accel_dev);
369 }
370 EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);