crypto: qat - move pf2vf interrupt [en|dis]able to adf_vf_isr.c
[linux-2.6-microblaze.git] / drivers / crypto / qat / qat_common / adf_pf2vf_msg.c
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2015 - 2020 Intel Corporation */
3 #include <linux/delay.h>
4 #include "adf_accel_devices.h"
5 #include "adf_common_drv.h"
6 #include "adf_pf2vf_msg.h"
7
8 #define ADF_DH895XCC_EP_OFFSET  0x3A000
9 #define ADF_DH895XCC_ERRMSK3    (ADF_DH895XCC_EP_OFFSET + 0x1C)
10 #define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
11 #define ADF_DH895XCC_ERRMSK5    (ADF_DH895XCC_EP_OFFSET + 0xDC)
12 #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
13
14 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
15                                  u32 vf_mask)
16 {
17         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
18         struct adf_bar *pmisc =
19                         &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
20         void __iomem *pmisc_addr = pmisc->virt_addr;
21         u32 reg;
22
23         /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
24         if (vf_mask & 0xFFFF) {
25                 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
26                 reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
27                 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
28         }
29
30         /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
31         if (vf_mask >> 16) {
32                 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
33                 reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
34                 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
35         }
36 }
37
38 void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
39 {
40         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
41         struct adf_bar *pmisc =
42                         &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
43         void __iomem *pmisc_addr = pmisc->virt_addr;
44         u32 reg;
45
46         /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
47         if (vf_mask & 0xFFFF) {
48                 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
49                         ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
50                 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
51         }
52
53         /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
54         if (vf_mask >> 16) {
55                 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
56                         ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
57                 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
58         }
59 }
60
61 static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
62 {
63         struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
64         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
65         void __iomem *pmisc_bar_addr =
66                 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
67         u32 val, pf2vf_offset, count = 0;
68         u32 local_in_use_mask, local_in_use_pattern;
69         u32 remote_in_use_mask, remote_in_use_pattern;
70         struct mutex *lock;     /* lock preventing concurrent acces of CSR */
71         u32 int_bit;
72         int ret = 0;
73
74         if (accel_dev->is_vf) {
75                 pf2vf_offset = hw_data->get_pf2vf_offset(0);
76                 lock = &accel_dev->vf.vf2pf_lock;
77                 local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
78                 local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
79                 remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
80                 remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
81                 int_bit = ADF_VF2PF_INT;
82         } else {
83                 pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
84                 lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
85                 local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
86                 local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
87                 remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
88                 remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
89                 int_bit = ADF_PF2VF_INT;
90         }
91
92         mutex_lock(lock);
93
94         /* Check if PF2VF CSR is in use by remote function */
95         val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
96         if ((val & remote_in_use_mask) == remote_in_use_pattern) {
97                 dev_dbg(&GET_DEV(accel_dev),
98                         "PF2VF CSR in use by remote function\n");
99                 ret = -EBUSY;
100                 goto out;
101         }
102
103         /* Attempt to get ownership of PF2VF CSR */
104         msg &= ~local_in_use_mask;
105         msg |= local_in_use_pattern;
106         ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
107
108         /* Wait in case remote func also attempting to get ownership */
109         msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
110
111         val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
112         if ((val & local_in_use_mask) != local_in_use_pattern) {
113                 dev_dbg(&GET_DEV(accel_dev),
114                         "PF2VF CSR in use by remote - collision detected\n");
115                 ret = -EBUSY;
116                 goto out;
117         }
118
119         /*
120          * This function now owns the PV2VF CSR.  The IN_USE_BY pattern must
121          * remain in the PF2VF CSR for all writes including ACK from remote
122          * until this local function relinquishes the CSR.  Send the message
123          * by interrupting the remote.
124          */
125         ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
126
127         /* Wait for confirmation from remote func it received the message */
128         do {
129                 msleep(ADF_IOV_MSG_ACK_DELAY);
130                 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
131         } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
132
133         if (val & int_bit) {
134                 dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
135                 val &= ~int_bit;
136                 ret = -EIO;
137         }
138
139         /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
140         ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
141 out:
142         mutex_unlock(lock);
143         return ret;
144 }
145
146 /**
147  * adf_iov_putmsg() - send PF2VF message
148  * @accel_dev:  Pointer to acceleration device.
149  * @msg:        Message to send
150  * @vf_nr:      VF number to which the message will be sent
151  *
152  * Function sends a message from the PF to a VF
153  *
154  * Return: 0 on success, error code otherwise.
155  */
156 int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
157 {
158         u32 count = 0;
159         int ret;
160
161         do {
162                 ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
163                 if (ret)
164                         msleep(ADF_IOV_MSG_RETRY_DELAY);
165         } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
166
167         return ret;
168 }
169 EXPORT_SYMBOL_GPL(adf_iov_putmsg);
170
171 void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
172 {
173         struct adf_accel_dev *accel_dev = vf_info->accel_dev;
174         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
175         int bar_id = hw_data->get_misc_bar_id(hw_data);
176         struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
177         void __iomem *pmisc_addr = pmisc->virt_addr;
178         u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
179
180         /* Read message from the VF */
181         msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
182
183         /* To ACK, clear the VF2PFINT bit */
184         msg &= ~ADF_VF2PF_INT;
185         ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
186
187         if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
188                 /* Ignore legacy non-system (non-kernel) VF2PF messages */
189                 goto err;
190
191         switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
192         case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
193                 {
194                 u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
195
196                 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
197                          (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
198                           ADF_PF2VF_MSGTYPE_SHIFT) |
199                          (ADF_PFVF_COMPAT_THIS_VERSION <<
200                           ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
201
202                 dev_dbg(&GET_DEV(accel_dev),
203                         "Compatibility Version Request from VF%d vers=%u\n",
204                         vf_nr + 1, vf_compat_ver);
205
206                 if (vf_compat_ver < hw_data->min_iov_compat_ver) {
207                         dev_err(&GET_DEV(accel_dev),
208                                 "VF (vers %d) incompatible with PF (vers %d)\n",
209                                 vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
210                         resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
211                                 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
212                 } else if (vf_compat_ver > ADF_PFVF_COMPAT_THIS_VERSION) {
213                         dev_err(&GET_DEV(accel_dev),
214                                 "VF (vers %d) compat with PF (vers %d) unkn.\n",
215                                 vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
216                         resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
217                                 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
218                 } else {
219                         dev_dbg(&GET_DEV(accel_dev),
220                                 "VF (vers %d) compatible with PF (vers %d)\n",
221                                 vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
222                         resp |= ADF_PF2VF_VF_COMPATIBLE <<
223                                 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
224                 }
225                 }
226                 break;
227         case ADF_VF2PF_MSGTYPE_VERSION_REQ:
228                 dev_dbg(&GET_DEV(accel_dev),
229                         "Legacy VersionRequest received from VF%d 0x%x\n",
230                         vf_nr + 1, msg);
231                 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
232                          (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
233                           ADF_PF2VF_MSGTYPE_SHIFT) |
234                          (ADF_PFVF_COMPAT_THIS_VERSION <<
235                           ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
236                 resp |= ADF_PF2VF_VF_COMPATIBLE <<
237                         ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
238                 /* Set legacy major and minor version num */
239                 resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
240                         1 << ADF_PF2VF_MINORVERSION_SHIFT;
241                 break;
242         case ADF_VF2PF_MSGTYPE_INIT:
243                 {
244                 dev_dbg(&GET_DEV(accel_dev),
245                         "Init message received from VF%d 0x%x\n",
246                         vf_nr + 1, msg);
247                 vf_info->init = true;
248                 }
249                 break;
250         case ADF_VF2PF_MSGTYPE_SHUTDOWN:
251                 {
252                 dev_dbg(&GET_DEV(accel_dev),
253                         "Shutdown message received from VF%d 0x%x\n",
254                         vf_nr + 1, msg);
255                 vf_info->init = false;
256                 }
257                 break;
258         default:
259                 goto err;
260         }
261
262         if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
263                 dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
264
265         /* re-enable interrupt on PF from this VF */
266         adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
267         return;
268 err:
269         dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
270                 vf_nr + 1, msg);
271 }
272
273 void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
274 {
275         struct adf_accel_vf_info *vf;
276         u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
277                 (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
278         int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
279
280         for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
281                 if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
282                         dev_err(&GET_DEV(accel_dev),
283                                 "Failed to send restarting msg to VF%d\n", i);
284         }
285 }
286
287 static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
288 {
289         unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
290         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
291         u32 msg = 0;
292         int ret;
293
294         msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
295         msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
296         msg |= ADF_PFVF_COMPAT_THIS_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
297         BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
298
299         reinit_completion(&accel_dev->vf.iov_msg_completion);
300
301         /* Send request from VF to PF */
302         ret = adf_iov_putmsg(accel_dev, msg, 0);
303         if (ret) {
304                 dev_err(&GET_DEV(accel_dev),
305                         "Failed to send Compatibility Version Request.\n");
306                 return ret;
307         }
308
309         /* Wait for response */
310         if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
311                                          timeout)) {
312                 dev_err(&GET_DEV(accel_dev),
313                         "IOV request/response message timeout expired\n");
314                 return -EIO;
315         }
316
317         /* Response from PF received, check compatibility */
318         switch (accel_dev->vf.compatible) {
319         case ADF_PF2VF_VF_COMPATIBLE:
320                 break;
321         case ADF_PF2VF_VF_COMPAT_UNKNOWN:
322                 /* VF is newer than PF and decides whether it is compatible */
323                 if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
324                         break;
325                 fallthrough;
326         case ADF_PF2VF_VF_INCOMPATIBLE:
327                 dev_err(&GET_DEV(accel_dev),
328                         "PF (vers %d) and VF (vers %d) are not compatible\n",
329                         accel_dev->vf.pf_version,
330                         ADF_PFVF_COMPAT_THIS_VERSION);
331                 return -EINVAL;
332         default:
333                 dev_err(&GET_DEV(accel_dev),
334                         "Invalid response from PF; assume not compatible\n");
335                 return -EINVAL;
336         }
337         return ret;
338 }
339
340 /**
341  * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
342  *
343  * @accel_dev: Pointer to acceleration device virtual function.
344  *
345  * Return: 0 on success, error code otherwise.
346  */
347 int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
348 {
349         adf_enable_pf2vf_interrupts(accel_dev);
350         return adf_vf2pf_request_version(accel_dev);
351 }
352 EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);