1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved
6 #include <linux/virtio_pci_admin.h>
7 #include "virtio_pci_common.h"
10 * virtio_pci_admin_has_legacy_io - Checks whether the legacy IO
11 * commands are supported
14 * Returns true on success.
16 bool virtio_pci_admin_has_legacy_io(struct pci_dev *pdev)
18 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
19 struct virtio_pci_device *vp_dev;
24 if (!virtio_has_feature(virtio_dev, VIRTIO_F_ADMIN_VQ))
27 vp_dev = to_vp_device(virtio_dev);
29 if ((vp_dev->admin_vq.supported_cmds & VIRTIO_LEGACY_ADMIN_CMD_BITMAP) ==
30 VIRTIO_LEGACY_ADMIN_CMD_BITMAP)
34 EXPORT_SYMBOL_GPL(virtio_pci_admin_has_legacy_io);
36 static int virtio_pci_admin_legacy_io_write(struct pci_dev *pdev, u16 opcode,
37 u8 offset, u8 size, u8 *buf)
39 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
40 struct virtio_admin_cmd_legacy_wr_data *data;
41 struct virtio_admin_cmd cmd = {};
42 struct scatterlist data_sg;
49 vf_id = pci_iov_vf_id(pdev);
53 data = kzalloc(sizeof(*data) + size, GFP_KERNEL);
57 data->offset = offset;
58 memcpy(data->registers, buf, size);
59 sg_init_one(&data_sg, data, sizeof(*data) + size);
60 cmd.opcode = cpu_to_le16(opcode);
61 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
62 cmd.group_member_id = cpu_to_le64(vf_id + 1);
63 cmd.data_sg = &data_sg;
64 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
71 * virtio_pci_admin_legacy_io_write_common - Write legacy common configuration
74 * @offset: starting byte offset within the common configuration area to write to
75 * @size: size of the data to write
76 * @buf: buffer which holds the data
78 * Note: caller must serialize access for the given device.
79 * Returns 0 on success, or negative on failure.
81 int virtio_pci_admin_legacy_common_io_write(struct pci_dev *pdev, u8 offset,
84 return virtio_pci_admin_legacy_io_write(pdev,
85 VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE,
88 EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_common_io_write);
91 * virtio_pci_admin_legacy_io_write_device - Write legacy device configuration
94 * @offset: starting byte offset within the device configuration area to write to
95 * @size: size of the data to write
96 * @buf: buffer which holds the data
98 * Note: caller must serialize access for the given device.
99 * Returns 0 on success, or negative on failure.
101 int virtio_pci_admin_legacy_device_io_write(struct pci_dev *pdev, u8 offset,
104 return virtio_pci_admin_legacy_io_write(pdev,
105 VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE,
108 EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_device_io_write);
110 static int virtio_pci_admin_legacy_io_read(struct pci_dev *pdev, u16 opcode,
111 u8 offset, u8 size, u8 *buf)
113 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
114 struct virtio_admin_cmd_legacy_rd_data *data;
115 struct scatterlist data_sg, result_sg;
116 struct virtio_admin_cmd cmd = {};
123 vf_id = pci_iov_vf_id(pdev);
127 data = kzalloc(sizeof(*data), GFP_KERNEL);
131 data->offset = offset;
132 sg_init_one(&data_sg, data, sizeof(*data));
133 sg_init_one(&result_sg, buf, size);
134 cmd.opcode = cpu_to_le16(opcode);
135 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
136 cmd.group_member_id = cpu_to_le64(vf_id + 1);
137 cmd.data_sg = &data_sg;
138 cmd.result_sg = &result_sg;
139 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
146 * virtio_pci_admin_legacy_device_io_read - Read legacy device configuration of
149 * @offset: starting byte offset within the device configuration area to read from
150 * @size: size of the data to be read
151 * @buf: buffer to hold the returned data
153 * Note: caller must serialize access for the given device.
154 * Returns 0 on success, or negative on failure.
156 int virtio_pci_admin_legacy_device_io_read(struct pci_dev *pdev, u8 offset,
159 return virtio_pci_admin_legacy_io_read(pdev,
160 VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ,
163 EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_device_io_read);
166 * virtio_pci_admin_legacy_common_io_read - Read legacy common configuration of
169 * @offset: starting byte offset within the common configuration area to read from
170 * @size: size of the data to be read
171 * @buf: buffer to hold the returned data
173 * Note: caller must serialize access for the given device.
174 * Returns 0 on success, or negative on failure.
176 int virtio_pci_admin_legacy_common_io_read(struct pci_dev *pdev, u8 offset,
179 return virtio_pci_admin_legacy_io_read(pdev,
180 VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ,
183 EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_common_io_read);
186 * virtio_pci_admin_legacy_io_notify_info - Read the queue notification
187 * information for legacy interface
189 * @req_bar_flags: requested bar flags
190 * @bar: on output the BAR number of the owner or member device
191 * @bar_offset: on output the offset within bar
193 * Returns 0 on success, or negative on failure.
195 int virtio_pci_admin_legacy_io_notify_info(struct pci_dev *pdev,
196 u8 req_bar_flags, u8 *bar,
199 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
200 struct virtio_admin_cmd_notify_info_result *result;
201 struct virtio_admin_cmd cmd = {};
202 struct scatterlist result_sg;
209 vf_id = pci_iov_vf_id(pdev);
213 result = kzalloc(sizeof(*result), GFP_KERNEL);
217 sg_init_one(&result_sg, result, sizeof(*result));
218 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO);
219 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
220 cmd.group_member_id = cpu_to_le64(vf_id + 1);
221 cmd.result_sg = &result_sg;
222 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
224 struct virtio_admin_cmd_notify_info_data *entry;
228 for (i = 0; i < VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO; i++) {
229 entry = &result->entries[i];
230 if (entry->flags == VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END)
232 if (entry->flags != req_bar_flags)
235 *bar_offset = le64_to_cpu(entry->offset);
244 EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_io_notify_info);