2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2015 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2015 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/pci.h>
49 #include <linux/mutex.h>
50 #include <linux/delay.h>
51 #include "adf_accel_devices.h"
52 #include "adf_common_drv.h"
53 #include "adf_pf2vf_msg.h"
55 #define ADF_DH895XCC_EP_OFFSET 0x3A000
56 #define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C)
57 #define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
58 #define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
59 #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
62 * adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts
63 * @accel_dev: Pointer to acceleration device.
65 * Function enables PF to VF interrupts
67 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
69 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
70 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
71 void __iomem *pmisc_bar_addr =
72 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
74 ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
76 EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts);
79 * adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts
80 * @accel_dev: Pointer to acceleration device.
82 * Function disables PF to VF interrupts
84 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
86 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
87 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
88 void __iomem *pmisc_bar_addr =
89 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
91 ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
93 EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
95 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
98 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
99 struct adf_bar *pmisc =
100 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
101 void __iomem *pmisc_addr = pmisc->virt_addr;
104 /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
105 if (vf_mask & 0xFFFF) {
106 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
107 reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
108 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
111 /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
113 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
114 reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
115 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
120 * adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts
121 * @accel_dev: Pointer to acceleration device.
123 * Function disables VF to PF interrupts
125 void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
127 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
128 struct adf_bar *pmisc =
129 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
130 void __iomem *pmisc_addr = pmisc->virt_addr;
133 /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
134 if (vf_mask & 0xFFFF) {
135 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
136 ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
137 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
140 /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
142 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
143 ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
144 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
147 EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts);
149 static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
151 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
152 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
153 void __iomem *pmisc_bar_addr =
154 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
155 u32 val, pf2vf_offset, count = 0;
156 u32 local_in_use_mask, local_in_use_pattern;
157 u32 remote_in_use_mask, remote_in_use_pattern;
158 struct mutex *lock; /* lock preventing concurrent acces of CSR */
162 if (accel_dev->is_vf) {
163 pf2vf_offset = hw_data->get_pf2vf_offset(0);
164 lock = &accel_dev->vf.vf2pf_lock;
165 local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
166 local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
167 remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
168 remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
169 int_bit = ADF_VF2PF_INT;
171 pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
172 lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
173 local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
174 local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
175 remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
176 remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
177 int_bit = ADF_PF2VF_INT;
182 /* Check if PF2VF CSR is in use by remote function */
183 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
184 if ((val & remote_in_use_mask) == remote_in_use_pattern) {
185 dev_dbg(&GET_DEV(accel_dev),
186 "PF2VF CSR in use by remote function\n");
191 /* Attempt to get ownership of PF2VF CSR */
192 msg &= ~local_in_use_mask;
193 msg |= local_in_use_pattern;
194 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
196 /* Wait in case remote func also attempting to get ownership */
197 msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
199 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
200 if ((val & local_in_use_mask) != local_in_use_pattern) {
201 dev_dbg(&GET_DEV(accel_dev),
202 "PF2VF CSR in use by remote - collision detected\n");
208 * This function now owns the PV2VF CSR. The IN_USE_BY pattern must
209 * remain in the PF2VF CSR for all writes including ACK from remote
210 * until this local function relinquishes the CSR. Send the message
211 * by interrupting the remote.
213 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
215 /* Wait for confirmation from remote func it received the message */
217 msleep(ADF_IOV_MSG_ACK_DELAY);
218 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
219 } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
222 dev_dbg(&GET_DEV(accel_dev),
223 "Collision - PFVF CSR overwritten by remote function\n");
229 dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
234 /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
235 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
242 * adf_iov_putmsg() - send PF2VF message
243 * @accel_dev: Pointer to acceleration device.
244 * @msg: Message to send
245 * @vf_nr: VF number to which the message will be sent
247 * Function sends a messge from the PF to a VF
249 * Return: 0 on success, error code otherwise.
251 int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
257 ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
259 msleep(ADF_IOV_MSG_RETRY_DELAY);
260 } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
264 EXPORT_SYMBOL_GPL(adf_iov_putmsg);
266 void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
268 struct adf_accel_dev *accel_dev = vf_info->accel_dev;
269 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
270 int bar_id = hw_data->get_misc_bar_id(hw_data);
271 struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
272 void __iomem *pmisc_addr = pmisc->virt_addr;
273 u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
275 /* Read message from the VF */
276 msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
278 /* To ACK, clear the VF2PFINT bit */
279 msg &= ~ADF_VF2PF_INT;
280 ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
282 if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
283 /* Ignore legacy non-system (non-kernel) VF2PF messages */
286 switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
287 case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
289 u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
291 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
292 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
293 ADF_PF2VF_MSGTYPE_SHIFT) |
294 (ADF_PFVF_COMPATIBILITY_VERSION <<
295 ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
297 dev_dbg(&GET_DEV(accel_dev),
298 "Compatibility Version Request from VF%d vers=%u\n",
299 vf_nr + 1, vf_compat_ver);
301 if (vf_compat_ver < hw_data->min_iov_compat_ver) {
302 dev_err(&GET_DEV(accel_dev),
303 "VF (vers %d) incompatible with PF (vers %d)\n",
304 vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
305 resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
306 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
307 } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
308 dev_err(&GET_DEV(accel_dev),
309 "VF (vers %d) compat with PF (vers %d) unkn.\n",
310 vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
311 resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
312 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
314 dev_dbg(&GET_DEV(accel_dev),
315 "VF (vers %d) compatible with PF (vers %d)\n",
316 vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
317 resp |= ADF_PF2VF_VF_COMPATIBLE <<
318 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
322 case ADF_VF2PF_MSGTYPE_VERSION_REQ:
323 dev_dbg(&GET_DEV(accel_dev),
324 "Legacy VersionRequest received from VF%d 0x%x\n",
326 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
327 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
328 ADF_PF2VF_MSGTYPE_SHIFT) |
329 (ADF_PFVF_COMPATIBILITY_VERSION <<
330 ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
331 resp |= ADF_PF2VF_VF_COMPATIBLE <<
332 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
333 /* Set legacy major and minor version num */
334 resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
335 1 << ADF_PF2VF_MINORVERSION_SHIFT;
337 case ADF_VF2PF_MSGTYPE_INIT:
339 dev_dbg(&GET_DEV(accel_dev),
340 "Init message received from VF%d 0x%x\n",
342 vf_info->init = true;
345 case ADF_VF2PF_MSGTYPE_SHUTDOWN:
347 dev_dbg(&GET_DEV(accel_dev),
348 "Shutdown message received from VF%d 0x%x\n",
350 vf_info->init = false;
357 if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
358 dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
360 /* re-enable interrupt on PF from this VF */
361 adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
364 dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
368 void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
370 struct adf_accel_vf_info *vf;
371 u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
372 (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
373 int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
375 for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
376 if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
377 dev_err(&GET_DEV(accel_dev),
378 "Failed to send restarting msg to VF%d\n", i);
382 static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
384 unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
385 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
389 msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
390 msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
391 msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
392 BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
394 reinit_completion(&accel_dev->vf.iov_msg_completion);
396 /* Send request from VF to PF */
397 ret = adf_iov_putmsg(accel_dev, msg, 0);
399 dev_err(&GET_DEV(accel_dev),
400 "Failed to send Compatibility Version Request.\n");
404 /* Wait for response */
405 if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
407 dev_err(&GET_DEV(accel_dev),
408 "IOV request/response message timeout expired\n");
412 /* Response from PF received, check compatibility */
413 switch (accel_dev->vf.compatible) {
414 case ADF_PF2VF_VF_COMPATIBLE:
416 case ADF_PF2VF_VF_COMPAT_UNKNOWN:
417 /* VF is newer than PF and decides whether it is compatible */
418 if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
421 case ADF_PF2VF_VF_INCOMPATIBLE:
422 dev_err(&GET_DEV(accel_dev),
423 "PF (vers %d) and VF (vers %d) are not compatible\n",
424 accel_dev->vf.pf_version,
425 ADF_PFVF_COMPATIBILITY_VERSION);
428 dev_err(&GET_DEV(accel_dev),
429 "Invalid response from PF; assume not compatible\n");
436 * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
438 * @accel_dev: Pointer to acceleration device virtual function.
440 * Return: 0 on success, error code otherwise.
442 int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
444 adf_enable_pf2vf_interrupts(accel_dev);
445 return adf_vf2pf_request_version(accel_dev);
447 EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);