1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc. */
4 #include <linux/anon_inodes.h>
5 #include <linux/file.h>
7 #include <linux/highmem.h>
8 #include <linux/vfio.h>
9 #include <linux/vfio_pci_core.h>
14 static struct pds_vfio_lm_file *
15 pds_vfio_get_lm_file(const struct file_operations *fops, int flags, u64 size)
17 struct pds_vfio_lm_file *lm_file = NULL;
18 unsigned long long npages;
26 /* Alloc file structure */
27 lm_file = kzalloc(sizeof(*lm_file), GFP_KERNEL);
33 anon_inode_getfile("pds_vfio_lm", fops, lm_file, flags);
34 if (IS_ERR(lm_file->filep))
37 stream_open(lm_file->filep->f_inode, lm_file->filep);
38 mutex_init(&lm_file->lock);
40 /* prevent file from being released before we are done with it */
41 get_file(lm_file->filep);
43 /* Allocate memory for file pages */
44 npages = DIV_ROUND_UP_ULL(size, PAGE_SIZE);
45 pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
49 page_mem = kvzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
51 goto out_free_pages_array;
53 p = page_mem - offset_in_page(page_mem);
54 for (unsigned long long i = 0; i < npages; i++) {
55 if (is_vmalloc_addr(p))
56 pages[i] = vmalloc_to_page(p);
58 pages[i] = kmap_to_page((void *)p);
60 goto out_free_page_mem;
65 /* Create scatterlist of file pages to use for DMA mapping later */
66 if (sg_alloc_table_from_pages(&lm_file->sg_table, pages, npages, 0,
68 goto out_free_page_mem;
71 lm_file->pages = pages;
72 lm_file->npages = npages;
73 lm_file->page_mem = page_mem;
74 lm_file->alloc_size = npages * PAGE_SIZE;
84 mutex_destroy(&lm_file->lock);
91 static void pds_vfio_put_lm_file(struct pds_vfio_lm_file *lm_file)
93 mutex_lock(&lm_file->lock);
96 lm_file->alloc_size = 0;
98 /* Free scatter list of file pages */
99 sg_free_table(&lm_file->sg_table);
101 kvfree(lm_file->page_mem);
102 lm_file->page_mem = NULL;
103 kfree(lm_file->pages);
104 lm_file->pages = NULL;
106 mutex_unlock(&lm_file->lock);
108 /* allow file to be released since we are done with it */
109 fput(lm_file->filep);
112 void pds_vfio_put_save_file(struct pds_vfio_pci_device *pds_vfio)
114 if (!pds_vfio->save_file)
117 pds_vfio_put_lm_file(pds_vfio->save_file);
118 pds_vfio->save_file = NULL;
121 void pds_vfio_put_restore_file(struct pds_vfio_pci_device *pds_vfio)
123 if (!pds_vfio->restore_file)
126 pds_vfio_put_lm_file(pds_vfio->restore_file);
127 pds_vfio->restore_file = NULL;
130 static struct page *pds_vfio_get_file_page(struct pds_vfio_lm_file *lm_file,
131 unsigned long offset)
133 unsigned long cur_offset = 0;
134 struct scatterlist *sg;
137 /* All accesses are sequential */
138 if (offset < lm_file->last_offset || !lm_file->last_offset_sg) {
139 lm_file->last_offset = 0;
140 lm_file->last_offset_sg = lm_file->sg_table.sgl;
141 lm_file->sg_last_entry = 0;
144 cur_offset = lm_file->last_offset;
146 for_each_sg(lm_file->last_offset_sg, sg,
147 lm_file->sg_table.orig_nents - lm_file->sg_last_entry, i) {
148 if (offset < sg->length + cur_offset) {
149 lm_file->last_offset_sg = sg;
150 lm_file->sg_last_entry += i;
151 lm_file->last_offset = cur_offset;
152 return nth_page(sg_page(sg),
153 (offset - cur_offset) / PAGE_SIZE);
155 cur_offset += sg->length;
161 static int pds_vfio_release_file(struct inode *inode, struct file *filp)
163 struct pds_vfio_lm_file *lm_file = filp->private_data;
165 mutex_lock(&lm_file->lock);
166 lm_file->filep->f_pos = 0;
168 mutex_unlock(&lm_file->lock);
169 mutex_destroy(&lm_file->lock);
175 static ssize_t pds_vfio_save_read(struct file *filp, char __user *buf,
176 size_t len, loff_t *pos)
178 struct pds_vfio_lm_file *lm_file = filp->private_data;
185 mutex_lock(&lm_file->lock);
186 if (*pos > lm_file->size) {
191 len = min_t(size_t, lm_file->size - *pos, len);
199 page_offset = (*pos) % PAGE_SIZE;
200 page = pds_vfio_get_file_page(lm_file, *pos - page_offset);
207 page_len = min_t(size_t, len, PAGE_SIZE - page_offset);
208 from_buff = kmap_local_page(page);
209 err = copy_to_user(buf, from_buff + page_offset, page_len);
210 kunmap_local(from_buff);
222 mutex_unlock(&lm_file->lock);
226 static const struct file_operations pds_vfio_save_fops = {
227 .owner = THIS_MODULE,
228 .read = pds_vfio_save_read,
229 .release = pds_vfio_release_file,
233 static int pds_vfio_get_save_file(struct pds_vfio_pci_device *pds_vfio)
235 struct device *dev = &pds_vfio->vfio_coredev.pdev->dev;
236 struct pds_vfio_lm_file *lm_file;
240 /* Get live migration state size in this state */
241 err = pds_vfio_get_lm_state_size_cmd(pds_vfio, &size);
243 dev_err(dev, "failed to get save status: %pe\n", ERR_PTR(err));
247 dev_dbg(dev, "save status, size = %lld\n", size);
250 dev_err(dev, "invalid state size\n");
254 lm_file = pds_vfio_get_lm_file(&pds_vfio_save_fops, O_RDONLY, size);
256 dev_err(dev, "failed to create save file\n");
260 dev_dbg(dev, "size = %lld, alloc_size = %lld, npages = %lld\n",
261 lm_file->size, lm_file->alloc_size, lm_file->npages);
263 pds_vfio->save_file = lm_file;
268 static ssize_t pds_vfio_restore_write(struct file *filp, const char __user *buf,
269 size_t len, loff_t *pos)
271 struct pds_vfio_lm_file *lm_file = filp->private_data;
272 loff_t requested_length;
281 check_add_overflow((loff_t)len, *pos, &requested_length))
284 mutex_lock(&lm_file->lock);
293 page_offset = (*pos) % PAGE_SIZE;
294 page = pds_vfio_get_file_page(lm_file, *pos - page_offset);
301 page_len = min_t(size_t, len, PAGE_SIZE - page_offset);
302 to_buff = kmap_local_page(page);
303 err = copy_from_user(to_buff + page_offset, buf, page_len);
304 kunmap_local(to_buff);
313 lm_file->size += page_len;
316 mutex_unlock(&lm_file->lock);
320 static const struct file_operations pds_vfio_restore_fops = {
321 .owner = THIS_MODULE,
322 .write = pds_vfio_restore_write,
323 .release = pds_vfio_release_file,
327 static int pds_vfio_get_restore_file(struct pds_vfio_pci_device *pds_vfio)
329 struct device *dev = &pds_vfio->vfio_coredev.pdev->dev;
330 struct pds_vfio_lm_file *lm_file;
333 size = sizeof(union pds_lm_dev_state);
334 dev_dbg(dev, "restore status, size = %lld\n", size);
337 dev_err(dev, "invalid state size");
341 lm_file = pds_vfio_get_lm_file(&pds_vfio_restore_fops, O_WRONLY, size);
343 dev_err(dev, "failed to create restore file");
346 pds_vfio->restore_file = lm_file;
352 pds_vfio_step_device_state_locked(struct pds_vfio_pci_device *pds_vfio,
353 enum vfio_device_mig_state next)
355 enum vfio_device_mig_state cur = pds_vfio->state;
358 if (cur == VFIO_DEVICE_STATE_STOP && next == VFIO_DEVICE_STATE_STOP_COPY) {
359 err = pds_vfio_get_save_file(pds_vfio);
363 err = pds_vfio_get_lm_state_cmd(pds_vfio);
365 pds_vfio_put_save_file(pds_vfio);
369 return pds_vfio->save_file->filep;
372 if (cur == VFIO_DEVICE_STATE_STOP_COPY && next == VFIO_DEVICE_STATE_STOP) {
373 pds_vfio_put_save_file(pds_vfio);
374 pds_vfio_dirty_disable(pds_vfio, true);
378 if (cur == VFIO_DEVICE_STATE_STOP && next == VFIO_DEVICE_STATE_RESUMING) {
379 err = pds_vfio_get_restore_file(pds_vfio);
383 return pds_vfio->restore_file->filep;
386 if (cur == VFIO_DEVICE_STATE_RESUMING && next == VFIO_DEVICE_STATE_STOP) {
387 err = pds_vfio_set_lm_state_cmd(pds_vfio);
391 pds_vfio_put_restore_file(pds_vfio);
395 if (cur == VFIO_DEVICE_STATE_RUNNING && next == VFIO_DEVICE_STATE_RUNNING_P2P) {
396 pds_vfio_send_host_vf_lm_status_cmd(pds_vfio,
397 PDS_LM_STA_IN_PROGRESS);
398 err = pds_vfio_suspend_device_cmd(pds_vfio,
399 PDS_LM_SUSPEND_RESUME_TYPE_P2P);
406 if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && next == VFIO_DEVICE_STATE_RUNNING) {
407 err = pds_vfio_resume_device_cmd(pds_vfio,
408 PDS_LM_SUSPEND_RESUME_TYPE_FULL);
412 pds_vfio_send_host_vf_lm_status_cmd(pds_vfio, PDS_LM_STA_NONE);
416 if (cur == VFIO_DEVICE_STATE_STOP && next == VFIO_DEVICE_STATE_RUNNING_P2P) {
417 err = pds_vfio_resume_device_cmd(pds_vfio,
418 PDS_LM_SUSPEND_RESUME_TYPE_P2P);
425 if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && next == VFIO_DEVICE_STATE_STOP) {
426 err = pds_vfio_suspend_device_cmd(pds_vfio,
427 PDS_LM_SUSPEND_RESUME_TYPE_FULL);
433 return ERR_PTR(-EINVAL);