1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2015 - 2020 Intel Corporation */
3 #include <linux/delay.h>
4 #include "adf_accel_devices.h"
5 #include "adf_common_drv.h"
6 #include "adf_pf2vf_msg.h"
8 #define ADF_DH895XCC_EP_OFFSET 0x3A000
9 #define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C)
10 #define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
11 #define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
12 #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
14 static void __adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
17 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
18 struct adf_bar *pmisc =
19 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
20 void __iomem *pmisc_addr = pmisc->virt_addr;
23 /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
24 if (vf_mask & 0xFFFF) {
25 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
26 reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
27 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
30 /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
32 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
33 reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
34 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
38 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
42 spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
43 __adf_enable_vf2pf_interrupts(accel_dev, vf_mask);
44 spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
47 static void __adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
50 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
51 struct adf_bar *pmisc =
52 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
53 void __iomem *pmisc_addr = pmisc->virt_addr;
56 /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
57 if (vf_mask & 0xFFFF) {
58 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
59 ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
60 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
63 /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
65 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
66 ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
67 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
71 void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
75 spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
76 __adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
77 spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
80 void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev, u32 vf_mask)
82 spin_lock(&accel_dev->pf.vf2pf_ints_lock);
83 __adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
84 spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
87 static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
89 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
90 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
91 void __iomem *pmisc_bar_addr =
92 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
93 u32 val, pf2vf_offset, count = 0;
94 u32 local_in_use_mask, local_in_use_pattern;
95 u32 remote_in_use_mask, remote_in_use_pattern;
96 struct mutex *lock; /* lock preventing concurrent acces of CSR */
100 if (accel_dev->is_vf) {
101 pf2vf_offset = hw_data->get_pf2vf_offset(0);
102 lock = &accel_dev->vf.vf2pf_lock;
103 local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
104 local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
105 remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
106 remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
107 int_bit = ADF_VF2PF_INT;
109 pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
110 lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
111 local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
112 local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
113 remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
114 remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
115 int_bit = ADF_PF2VF_INT;
120 /* Check if PF2VF CSR is in use by remote function */
121 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
122 if ((val & remote_in_use_mask) == remote_in_use_pattern) {
123 dev_dbg(&GET_DEV(accel_dev),
124 "PF2VF CSR in use by remote function\n");
129 /* Attempt to get ownership of PF2VF CSR */
130 msg &= ~local_in_use_mask;
131 msg |= local_in_use_pattern;
132 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
134 /* Wait in case remote func also attempting to get ownership */
135 msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
137 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
138 if ((val & local_in_use_mask) != local_in_use_pattern) {
139 dev_dbg(&GET_DEV(accel_dev),
140 "PF2VF CSR in use by remote - collision detected\n");
146 * This function now owns the PV2VF CSR. The IN_USE_BY pattern must
147 * remain in the PF2VF CSR for all writes including ACK from remote
148 * until this local function relinquishes the CSR. Send the message
149 * by interrupting the remote.
151 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
153 /* Wait for confirmation from remote func it received the message */
155 msleep(ADF_IOV_MSG_ACK_DELAY);
156 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
157 } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
160 dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
165 /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
166 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
173 * adf_iov_putmsg() - send PF2VF message
174 * @accel_dev: Pointer to acceleration device.
175 * @msg: Message to send
176 * @vf_nr: VF number to which the message will be sent
178 * Function sends a message from the PF to a VF
180 * Return: 0 on success, error code otherwise.
182 int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
188 ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
190 msleep(ADF_IOV_MSG_RETRY_DELAY);
191 } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
196 void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
198 struct adf_accel_dev *accel_dev = vf_info->accel_dev;
199 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
200 int bar_id = hw_data->get_misc_bar_id(hw_data);
201 struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
202 void __iomem *pmisc_addr = pmisc->virt_addr;
203 u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
205 /* Read message from the VF */
206 msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
208 /* To ACK, clear the VF2PFINT bit */
209 msg &= ~ADF_VF2PF_INT;
210 ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
212 if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
213 /* Ignore legacy non-system (non-kernel) VF2PF messages */
216 switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
217 case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
219 u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
221 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
222 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
223 ADF_PF2VF_MSGTYPE_SHIFT) |
224 (ADF_PFVF_COMPAT_THIS_VERSION <<
225 ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
227 dev_dbg(&GET_DEV(accel_dev),
228 "Compatibility Version Request from VF%d vers=%u\n",
229 vf_nr + 1, vf_compat_ver);
231 if (vf_compat_ver < hw_data->min_iov_compat_ver) {
232 dev_err(&GET_DEV(accel_dev),
233 "VF (vers %d) incompatible with PF (vers %d)\n",
234 vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
235 resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
236 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
237 } else if (vf_compat_ver > ADF_PFVF_COMPAT_THIS_VERSION) {
238 dev_err(&GET_DEV(accel_dev),
239 "VF (vers %d) compat with PF (vers %d) unkn.\n",
240 vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
241 resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
242 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
244 dev_dbg(&GET_DEV(accel_dev),
245 "VF (vers %d) compatible with PF (vers %d)\n",
246 vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
247 resp |= ADF_PF2VF_VF_COMPATIBLE <<
248 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
252 case ADF_VF2PF_MSGTYPE_VERSION_REQ:
253 dev_dbg(&GET_DEV(accel_dev),
254 "Legacy VersionRequest received from VF%d 0x%x\n",
256 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
257 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
258 ADF_PF2VF_MSGTYPE_SHIFT) |
259 (ADF_PFVF_COMPAT_THIS_VERSION <<
260 ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
261 resp |= ADF_PF2VF_VF_COMPATIBLE <<
262 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
263 /* Set legacy major and minor version num */
264 resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
265 1 << ADF_PF2VF_MINORVERSION_SHIFT;
267 case ADF_VF2PF_MSGTYPE_INIT:
269 dev_dbg(&GET_DEV(accel_dev),
270 "Init message received from VF%d 0x%x\n",
272 vf_info->init = true;
275 case ADF_VF2PF_MSGTYPE_SHUTDOWN:
277 dev_dbg(&GET_DEV(accel_dev),
278 "Shutdown message received from VF%d 0x%x\n",
280 vf_info->init = false;
287 if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
288 dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
290 /* re-enable interrupt on PF from this VF */
291 adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
295 dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
299 void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
301 struct adf_accel_vf_info *vf;
302 u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
303 (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
304 int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
306 for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
307 if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
308 dev_err(&GET_DEV(accel_dev),
309 "Failed to send restarting msg to VF%d\n", i);
313 static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
315 unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
316 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
320 msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
321 msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
322 msg |= ADF_PFVF_COMPAT_THIS_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
323 BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
325 reinit_completion(&accel_dev->vf.iov_msg_completion);
327 /* Send request from VF to PF */
328 ret = adf_iov_putmsg(accel_dev, msg, 0);
330 dev_err(&GET_DEV(accel_dev),
331 "Failed to send Compatibility Version Request.\n");
335 /* Wait for response */
336 if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
338 dev_err(&GET_DEV(accel_dev),
339 "IOV request/response message timeout expired\n");
343 /* Response from PF received, check compatibility */
344 switch (accel_dev->vf.compatible) {
345 case ADF_PF2VF_VF_COMPATIBLE:
347 case ADF_PF2VF_VF_COMPAT_UNKNOWN:
348 /* VF is newer than PF and decides whether it is compatible */
349 if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver) {
350 accel_dev->vf.compatible = ADF_PF2VF_VF_COMPATIBLE;
354 case ADF_PF2VF_VF_INCOMPATIBLE:
355 dev_err(&GET_DEV(accel_dev),
356 "PF (vers %d) and VF (vers %d) are not compatible\n",
357 accel_dev->vf.pf_version,
358 ADF_PFVF_COMPAT_THIS_VERSION);
361 dev_err(&GET_DEV(accel_dev),
362 "Invalid response from PF; assume not compatible\n");
369 * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
371 * @accel_dev: Pointer to acceleration device virtual function.
373 * Return: 0 on success, error code otherwise.
375 int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
377 adf_enable_pf2vf_interrupts(accel_dev);
378 return adf_vf2pf_request_version(accel_dev);
380 EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);