1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
4 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2002 Andi Kleen
7 * This handles calls from both 32bit and 64bit mode.
15 #include <linux/errno.h>
16 #include <linux/gfp.h>
17 #include <linux/sched.h>
18 #include <linux/string.h>
20 #include <linux/smp.h>
21 #include <linux/syscalls.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/uaccess.h>
29 #include <asm/mmu_context.h>
30 #include <asm/syscalls.h>
32 static void refresh_ldt_segments(void)
38 * Make sure that the cached DS and ES descriptors match the updated
42 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
46 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
51 /* context.lock is held by the task which issued the smp function call */
52 static void flush_ldt(void *__mm)
54 struct mm_struct *mm = __mm;
56 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
61 refresh_ldt_segments();
64 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
65 static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
67 struct ldt_struct *new_ldt;
68 unsigned int alloc_size;
70 if (num_entries > LDT_ENTRIES)
73 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
77 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
78 alloc_size = num_entries * LDT_ENTRY_SIZE;
81 * Xen is very picky: it requires a page-aligned LDT that has no
82 * trailing nonzero bytes in any page that contains LDT descriptors.
83 * Keep it simple: zero the whole allocation and never allocate less
86 if (alloc_size > PAGE_SIZE)
87 new_ldt->entries = vzalloc(alloc_size);
89 new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
91 if (!new_ldt->entries) {
96 /* The new LDT isn't aliased for PTI yet. */
99 new_ldt->nr_entries = num_entries;
104 * If PTI is enabled, this maps the LDT into the kernelmode and
105 * usermode tables for the given mm.
108 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
110 #ifdef CONFIG_PAGE_TABLE_ISOLATION
111 bool is_vmalloc, had_top_level_entry;
117 if (!static_cpu_has(X86_FEATURE_PTI))
121 * Any given ldt_struct should have map_ldt_struct() called at most
124 WARN_ON(ldt->slot != -1);
127 * Did we already have the top level entry allocated? We can't
128 * use pgd_none() for this because it doens't do anything on
129 * 4-level page table kernels.
131 pgd = pgd_offset(mm, LDT_BASE_ADDR);
132 had_top_level_entry = (pgd->pgd != 0);
134 is_vmalloc = is_vmalloc_addr(ldt->entries);
136 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
138 for (i = 0; i < nr_pages; i++) {
139 unsigned long offset = i << PAGE_SHIFT;
140 const void *src = (char *)ldt->entries + offset;
144 va = (unsigned long)ldt_slot_va(slot) + offset;
145 pfn = is_vmalloc ? vmalloc_to_pfn(src) :
146 page_to_pfn(virt_to_page(src));
148 * Treat the PTI LDT range as a *userspace* range.
149 * get_locked_pte() will allocate all needed pagetables
150 * and account for them in this mm.
152 ptep = get_locked_pte(mm, va, &ptl);
156 * Map it RO so the easy to find address is not a primary
157 * target via some kernel interface which misses a
160 pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL));
161 set_pte_at(mm, va, ptep, pte);
162 pte_unmap_unlock(ptep, ptl);
165 if (mm->context.ldt) {
167 * We already had an LDT. The top-level entry should already
168 * have been allocated and synchronized with the usermode
171 WARN_ON(!had_top_level_entry);
172 if (static_cpu_has(X86_FEATURE_PTI))
173 WARN_ON(!kernel_to_user_pgdp(pgd)->pgd);
176 * This is the first time we're mapping an LDT for this process.
177 * Sync the pgd to the usermode tables.
179 WARN_ON(had_top_level_entry);
180 if (static_cpu_has(X86_FEATURE_PTI)) {
181 WARN_ON(kernel_to_user_pgdp(pgd)->pgd);
182 set_pgd(kernel_to_user_pgdp(pgd), *pgd);
191 static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
193 #ifdef CONFIG_PAGE_TABLE_ISOLATION
200 /* LDT map/unmap is only required for PTI */
201 if (!static_cpu_has(X86_FEATURE_PTI))
204 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
206 for (i = 0; i < nr_pages; i++) {
207 unsigned long offset = i << PAGE_SHIFT;
211 va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
212 ptep = get_locked_pte(mm, va, &ptl);
213 pte_clear(mm, va, ptep);
214 pte_unmap_unlock(ptep, ptl);
217 va = (unsigned long)ldt_slot_va(ldt->slot);
218 flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, 0);
219 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
222 static void free_ldt_pgtables(struct mm_struct *mm)
224 #ifdef CONFIG_PAGE_TABLE_ISOLATION
225 struct mmu_gather tlb;
226 unsigned long start = LDT_BASE_ADDR;
227 unsigned long end = start + (1UL << PGDIR_SHIFT);
229 if (!static_cpu_has(X86_FEATURE_PTI))
232 tlb_gather_mmu(&tlb, mm, start, end);
233 free_pgd_range(&tlb, start, end, start, end);
234 tlb_finish_mmu(&tlb, start, end);
238 /* After calling this, the LDT is immutable. */
239 static void finalize_ldt_struct(struct ldt_struct *ldt)
241 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
244 static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
246 mutex_lock(&mm->context.lock);
248 /* Synchronizes with READ_ONCE in load_mm_ldt. */
249 smp_store_release(&mm->context.ldt, ldt);
251 /* Activate the LDT for all CPUs using currents mm. */
252 on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
254 mutex_unlock(&mm->context.lock);
257 static void free_ldt_struct(struct ldt_struct *ldt)
262 paravirt_free_ldt(ldt->entries, ldt->nr_entries);
263 if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
264 vfree_atomic(ldt->entries);
266 free_page((unsigned long)ldt->entries);
271 * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
272 * the new task is not running, so nothing can be installed.
274 int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
276 struct ldt_struct *new_ldt;
282 mutex_lock(&old_mm->context.lock);
283 if (!old_mm->context.ldt)
286 new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
292 memcpy(new_ldt->entries, old_mm->context.ldt->entries,
293 new_ldt->nr_entries * LDT_ENTRY_SIZE);
294 finalize_ldt_struct(new_ldt);
296 retval = map_ldt_struct(mm, new_ldt, 0);
298 free_ldt_pgtables(mm);
299 free_ldt_struct(new_ldt);
302 mm->context.ldt = new_ldt;
305 mutex_unlock(&old_mm->context.lock);
310 * No need to lock the MM as we are the last user
312 * 64bit: Don't touch the LDT register - we're already in the next thread.
314 void destroy_context_ldt(struct mm_struct *mm)
316 free_ldt_struct(mm->context.ldt);
317 mm->context.ldt = NULL;
320 void ldt_arch_exit_mmap(struct mm_struct *mm)
322 free_ldt_pgtables(mm);
325 static int read_ldt(void __user *ptr, unsigned long bytecount)
327 struct mm_struct *mm = current->mm;
328 unsigned long entries_size;
331 down_read(&mm->context.ldt_usr_sem);
333 if (!mm->context.ldt) {
338 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
339 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
341 entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
342 if (entries_size > bytecount)
343 entries_size = bytecount;
345 if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
350 if (entries_size != bytecount) {
351 /* Zero-fill the rest and pretend we read bytecount bytes. */
352 if (clear_user(ptr + entries_size, bytecount - entries_size)) {
360 up_read(&mm->context.ldt_usr_sem);
364 static int read_default_ldt(void __user *ptr, unsigned long bytecount)
366 /* CHECKME: Can we use _one_ random number ? */
368 unsigned long size = 5 * sizeof(struct desc_struct);
370 unsigned long size = 128;
372 if (bytecount > size)
374 if (clear_user(ptr, bytecount))
379 static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
381 struct mm_struct *mm = current->mm;
382 struct ldt_struct *new_ldt, *old_ldt;
383 unsigned int old_nr_entries, new_nr_entries;
384 struct user_desc ldt_info;
385 struct desc_struct ldt;
389 if (bytecount != sizeof(ldt_info))
392 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
396 if (ldt_info.entry_number >= LDT_ENTRIES)
398 if (ldt_info.contents == 3) {
401 if (ldt_info.seg_not_present == 0)
405 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
406 LDT_empty(&ldt_info)) {
407 /* The user wants to clear the entry. */
408 memset(&ldt, 0, sizeof(ldt));
410 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
415 fill_ldt(&ldt, &ldt_info);
420 if (down_write_killable(&mm->context.ldt_usr_sem))
423 old_ldt = mm->context.ldt;
424 old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
425 new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
428 new_ldt = alloc_ldt_struct(new_nr_entries);
433 memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
435 new_ldt->entries[ldt_info.entry_number] = ldt;
436 finalize_ldt_struct(new_ldt);
439 * If we are using PTI, map the new LDT into the userspace pagetables.
440 * If there is already an LDT, use the other slot so that other CPUs
441 * will continue to use the old LDT until install_ldt() switches
442 * them over to the new LDT.
444 error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
447 * This only can fail for the first LDT setup. If an LDT is
448 * already installed then the PTE page is already
449 * populated. Mop up a half populated page table.
451 if (!WARN_ON_ONCE(old_ldt))
452 free_ldt_pgtables(mm);
453 free_ldt_struct(new_ldt);
457 install_ldt(mm, new_ldt);
458 unmap_ldt_struct(mm, old_ldt);
459 free_ldt_struct(old_ldt);
463 up_write(&mm->context.ldt_usr_sem);
468 SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
469 unsigned long , bytecount)
475 ret = read_ldt(ptr, bytecount);
478 ret = write_ldt(ptr, bytecount, 1);
481 ret = read_default_ldt(ptr, bytecount);
484 ret = write_ldt(ptr, bytecount, 0);
488 * The SYSCALL_DEFINE() macros give us an 'unsigned long'
489 * return type, but tht ABI for sys_modify_ldt() expects
490 * 'int'. This cast gives us an int-sized value in %rax
491 * for the return code. The 'unsigned' is necessary so
492 * the compiler does not try to sign-extend the negative
493 * return codes into the high half of the register when
494 * taking the value from int->long.
496 return (unsigned int)ret;