GNU Linux-libre 5.15.72-gnu
[releases.git] / arch / powerpc / platforms / pseries / svm.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Secure VM platform
4  *
5  * Copyright 2018 IBM Corporation
6  * Author: Anshuman Khandual <khandual@linux.vnet.ibm.com>
7  */
8
9 #include <linux/mm.h>
10 #include <linux/memblock.h>
11 #include <asm/machdep.h>
12 #include <asm/svm.h>
13 #include <asm/swiotlb.h>
14 #include <asm/ultravisor.h>
15 #include <asm/dtl.h>
16
17 static int __init init_svm(void)
18 {
19         if (!is_secure_guest())
20                 return 0;
21
22         /* Don't release the SWIOTLB buffer. */
23         ppc_swiotlb_enable = 1;
24
25         /*
26          * Since the guest memory is inaccessible to the host, devices always
27          * need to use the SWIOTLB buffer for DMA even if dma_capable() says
28          * otherwise.
29          */
30         swiotlb_force = SWIOTLB_FORCE;
31
32         /* Share the SWIOTLB buffer with the host. */
33         swiotlb_update_mem_attributes();
34
35         return 0;
36 }
37 machine_early_initcall(pseries, init_svm);
38
39 /*
40  * Initialize SWIOTLB. Essentially the same as swiotlb_init(), except that it
41  * can allocate the buffer anywhere in memory. Since the hypervisor doesn't have
42  * any addressing limitation, we don't need to allocate it in low addresses.
43  */
44 void __init svm_swiotlb_init(void)
45 {
46         unsigned char *vstart;
47         unsigned long bytes, io_tlb_nslabs;
48
49         io_tlb_nslabs = (swiotlb_size_or_default() >> IO_TLB_SHIFT);
50         io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
51
52         bytes = io_tlb_nslabs << IO_TLB_SHIFT;
53
54         vstart = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
55         if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false))
56                 return;
57
58
59         memblock_free_early(__pa(vstart),
60                             PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
61         panic("SVM: Cannot allocate SWIOTLB buffer");
62 }
63
64 int set_memory_encrypted(unsigned long addr, int numpages)
65 {
66         if (!mem_encrypt_active())
67                 return 0;
68
69         if (!PAGE_ALIGNED(addr))
70                 return -EINVAL;
71
72         uv_unshare_page(PHYS_PFN(__pa(addr)), numpages);
73
74         return 0;
75 }
76
77 int set_memory_decrypted(unsigned long addr, int numpages)
78 {
79         if (!mem_encrypt_active())
80                 return 0;
81
82         if (!PAGE_ALIGNED(addr))
83                 return -EINVAL;
84
85         uv_share_page(PHYS_PFN(__pa(addr)), numpages);
86
87         return 0;
88 }
89
90 /* There's one dispatch log per CPU. */
91 #define NR_DTL_PAGE (DISPATCH_LOG_BYTES * CONFIG_NR_CPUS / PAGE_SIZE)
92
93 static struct page *dtl_page_store[NR_DTL_PAGE];
94 static long dtl_nr_pages;
95
96 static bool is_dtl_page_shared(struct page *page)
97 {
98         long i;
99
100         for (i = 0; i < dtl_nr_pages; i++)
101                 if (dtl_page_store[i] == page)
102                         return true;
103
104         return false;
105 }
106
107 void dtl_cache_ctor(void *addr)
108 {
109         unsigned long pfn = PHYS_PFN(__pa(addr));
110         struct page *page = pfn_to_page(pfn);
111
112         if (!is_dtl_page_shared(page)) {
113                 dtl_page_store[dtl_nr_pages] = page;
114                 dtl_nr_pages++;
115                 WARN_ON(dtl_nr_pages >= NR_DTL_PAGE);
116                 uv_share_page(pfn, 1);
117         }
118 }