4 * Copyright (C) 2018 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU Affero General Public License as
8 * published by the Free Software Foundation, either version 3 of the
9 * License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Affero General Public License for more details.
16 * You should have received a copy of the GNU Affero General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <exception.h>
29 extern void reschedule(void);
31 bool_t scheduler_enabled = FALSE;
32 static list_entry_t thread_queue[THREAD_PRIORITY_MAX];
33 static thread_t *current_thread = NULL;
34 static thread_t *last_fpu_thread = NULL;
35 static dword_t tid_alloc_bitmap[MAX_THREADS / 32];
36 static DECLARE_LOCK(tid_bitmap_lock);
38 static dword_t alloc_tid()
41 dword_t tid = (dword_t)-1;
43 lock_acquire(&tid_bitmap_lock);
45 for (i = 0; i < MAX_THREADS; i++)
47 if (!test_bit(tid_alloc_bitmap, i))
50 set_bit(tid_alloc_bitmap, i);
55 lock_release(&tid_bitmap_lock);
59 static inline bool_t test_condition(wait_condition_t *condition)
61 wait_condition_t **ptr;
63 switch (condition->type)
66 for (ptr = condition->conditions; *ptr; ptr++) if (test_condition(*ptr)) return TRUE;
69 for (ptr = condition->conditions; *ptr; ptr++) if (!test_condition(*ptr)) return FALSE;
73 case WAIT_UNTIL_EQUAL:
74 return *condition->pointer == condition->value;
75 case WAIT_UNTIL_NOT_EQUAL:
76 return (*condition->pointer != condition->value);
78 return (*condition->pointer < condition->value);
79 case WAIT_UNTIL_NOT_LESS:
80 return (*condition->pointer >= condition->value);
81 case WAIT_UNTIL_GREATER:
82 return (*condition->pointer > condition->value);
83 case WAIT_UNTIL_NOT_GREATER:
84 return (*condition->pointer <= condition->value);
86 KERNEL_CRASH("Invalid wait condition value");
91 static inline bool_t is_thread_ready(thread_t *thread)
93 qword_t current_time = syscall_get_milliseconds();
95 if (thread->terminated) return FALSE;
96 if (thread->frozen > 0 && !thread->in_kernel) return FALSE;
97 if (!thread->wait) return TRUE;
99 if (test_condition(thread->wait->root))
101 thread->wait->result = WAIT_CONDITION_HIT;
106 if (thread->wait->timeout != NO_TIMEOUT && (current_time - thread->wait->timestamp) >= (qword_t)thread->wait->timeout)
108 thread->wait->result = WAIT_TIMED_OUT;
113 if (thread->terminating)
115 thread->wait->result = WAIT_CANCELED;
123 static void destroy_thread(thread_t *thread)
125 list_remove(&thread->in_queue_list);
127 lock_acquire(&thread->owner_process->thread_list_lock);
128 list_remove(&thread->in_process_list);
129 lock_release(&thread->owner_process->thread_list_lock);
131 free(thread->kernel_stack);
132 thread->kernel_stack = NULL;
134 if (thread->owner_process->threads.next == &thread->owner_process->threads)
136 destroy_process(thread->owner_process);
139 dereference(&thread->header);
142 void thread_cleanup(object_t *obj)
144 if (CONTAINER_OF(obj, thread_t, header) == last_fpu_thread) last_fpu_thread = NULL;
147 dword_t thread_pre_wait(object_t *obj, void *parameter, wait_condition_t *condition)
149 thread_t *thread = (thread_t*)obj;
150 condition->type = WAIT_UNTIL_NOT_EQUAL;
151 condition->pointer = &thread->terminated;
152 condition->value = FALSE;
156 dword_t create_thread_internal(process_t *proc, thread_state_t *initial_state, dword_t flags, priority_t priority, void *kernel_stack, thread_t **new_thread)
159 if (proc->terminating) return ERR_CANCELED;
161 thread_t *thread = (thread_t*)malloc(sizeof(thread_t));
162 if (thread == NULL) return ERR_NOMEMORY;
164 init_object(&thread->header, NULL, OBJECT_THREAD);
166 ret = create_object(&thread->header);
167 if (ret != ERR_SUCCESS)
173 thread->tid = alloc_tid();
174 if (thread->tid == (dword_t)-1)
180 thread->priority = priority;
181 thread->quantum = QUANTUM;
182 thread->frozen = (flags & THREAD_CREATE_FROZEN) ? TRUE : FALSE;
183 thread->running_ticks = 0ULL;
184 thread->owner_process = proc;
185 thread->exit_code = 0;
186 thread->terminating = FALSE;
187 thread->terminated = FALSE;
188 thread->last_context = NULL;
190 memset(&thread->kernel_handler, 0, sizeof(thread->kernel_handler));
191 memset(&thread->user_handler, 0, sizeof(thread->user_handler));
193 thread->state = *initial_state;
194 thread->state.regs.eflags = 0x202;
196 if (proc != kernel_process)
198 thread->previous_mode = USER_MODE;
199 thread->in_kernel = 0;
200 thread->state.regs.cs = get_user_code_selector();
201 thread->state.regs.data_selector = get_user_data_selector();
205 thread->previous_mode = KERNEL_MODE;
207 thread->state.regs.cs = get_kernel_code_selector();
208 thread->state.regs.data_selector = get_kernel_data_selector();
211 thread->kernel_stack = kernel_stack;
212 thread->kernel_esp = ((uintptr_t)thread->kernel_stack + KERNEL_STACK_SIZE + 3) & ~3;
214 lock_acquire(&thread->owner_process->thread_list_lock);
215 list_append(&proc->threads, &thread->in_process_list);
216 lock_release(&thread->owner_process->thread_list_lock);
219 enter_critical(&critical);
220 list_append(&thread_queue[priority], &thread->in_queue_list);
221 leave_critical(&critical);
223 *new_thread = thread;
227 if (ret != ERR_SUCCESS)
229 if (thread->kernel_stack) free(thread->kernel_stack);
230 if (thread != NULL) dereference(&thread->header);
232 if (thread->tid != (dword_t)-1)
234 lock_acquire(&tid_bitmap_lock);
235 clear_bit(tid_alloc_bitmap, thread->tid);
236 lock_release(&tid_bitmap_lock);
243 thread_t *get_current_thread()
245 return current_thread;
248 void thread_lazy_fpu(void)
250 if (last_fpu_thread) cpu_save_fpu_state(last_fpu_thread->state.fpu_state);
251 cpu_restore_fpu_state(current_thread->state.fpu_state);
252 last_fpu_thread = current_thread;
253 asm volatile ("clts");
258 void scheduler(registers_t *regs)
262 enter_critical(&critical);
264 if (current_thread->quantum == 0)
266 list_append(&thread_queue[current_thread->priority], ¤t_thread->in_queue_list);
267 thread_t *next_thread = NULL;
269 for (i = 0; i < THREAD_PRIORITY_MAX; i++)
273 for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
275 thread_t *thread = CONTAINER_OF(ptr, thread_t, in_queue_list);
277 if (is_thread_ready(thread))
279 next_thread = thread;
286 ASSERT(next_thread != NULL);
287 list_remove(&next_thread->in_queue_list);
289 if (current_thread->tid != 0) ASSERT(current_thread->kernel_esp >= (uintptr_t)current_thread->kernel_stack);
290 if (next_thread->tid != 0) ASSERT(next_thread->kernel_esp >= (uintptr_t)next_thread->kernel_stack);
292 if (current_thread != next_thread)
294 memcpy(¤t_thread->state.regs, regs, sizeof(registers_t));
295 current_thread->kernel_esp = regs->esp;
296 if (SEGMENT_RPL(regs->cs) != 0) current_thread->state.regs.esp = ((registers_ext_t*)regs)->esp3;
298 set_kernel_esp(next_thread->kernel_esp);
300 /*asm volatile ("pushl %eax\n"
306 if (SEGMENT_RPL(next_thread->state.regs.cs) != 0)
308 push_to_stack(&next_thread->kernel_esp, get_user_data_selector());
309 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
312 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eflags);
313 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.cs);
314 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eip);
315 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.error_code);
316 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eax);
317 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ecx);
318 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edx);
319 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebx);
320 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
321 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebp);
322 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esi);
323 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edi);
324 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.data_selector);
326 regs->esp = next_thread->kernel_esp;
327 regs->error_code = CONTEXT_SWITCH_MAGIC;
329 if (current_thread->owner_process != next_thread->owner_process)
331 set_page_directory(next_thread->owner_process->memory_space.page_directory);
335 if (current_thread->owner_process != kernel_process)
337 bump_address_space(¤t_thread->owner_process->memory_space);
340 if (current_thread->terminating && !current_thread->in_kernel) current_thread->terminated = TRUE;
341 if (current_thread->terminated) destroy_thread(current_thread);
342 current_thread = next_thread;
343 current_thread->quantum = QUANTUM;
347 current_thread->quantum--;
350 leave_critical(&critical);
353 wait_result_t scheduler_wait(wait_condition_t *condition, dword_t timeout)
355 if (test_condition(condition)) return WAIT_CONDITION_HIT;
356 if (timeout == 0) return WAIT_TIMED_OUT;
358 wait_t wait = { .root = condition, .timeout = timeout, .timestamp = syscall_get_milliseconds(), .result = WAIT_CANCELED };
359 while (!__sync_bool_compare_and_swap(¤t_thread->wait, NULL, &wait)) continue;
360 syscall_yield_quantum();
365 sysret_t syscall_sleep(qword_t milliseconds)
367 wait_condition_t condition = { .type = WAIT_ALWAYS };
368 return scheduler_wait(&condition, milliseconds) == WAIT_CANCELED ? ERR_CANCELED : ERR_SUCCESS;
371 sysret_t syscall_yield_quantum()
373 current_thread->quantum = 0;
378 dword_t create_system_thread(thread_procedure_t routine, dword_t flags, priority_t priority, dword_t stack_size, void *param, thread_t **new_thread)
380 thread_state_t initial_state;
381 memset(&initial_state, 0, sizeof(initial_state));
383 if (!stack_size) stack_size = KERNEL_STACK_SIZE;
385 void *kernel_stack = malloc(stack_size + sizeof(uintptr_t) - 1);
386 if (kernel_stack == NULL) return ERR_NOMEMORY;
388 dword_t ret = commit_pages(kernel_stack, stack_size);
389 if (ret != ERR_SUCCESS)
395 initial_state.regs.eip = (dword_t)routine;
396 initial_state.regs.esp = ((dword_t)kernel_stack + stack_size + 3) & ~3;
398 push_to_stack((uintptr_t*)&initial_state.regs.esp, (uintptr_t)param);
400 return create_thread_internal(kernel_process, &initial_state, flags, priority, kernel_stack, new_thread);
403 sysret_t syscall_create_thread(handle_t process, thread_state_t *initial_state, dword_t flags, priority_t priority, handle_t *new_thread)
406 thread_state_t safe_state;
410 if (get_previous_mode() == USER_MODE)
412 if (!check_usermode(initial_state, sizeof(initial_state))) return ERR_BADPTR;
413 if (!check_usermode(new_thread, sizeof(*new_thread))) return ERR_BADPTR;
415 EH_TRY safe_state = *initial_state;
416 EH_CATCH EH_ESCAPE(return ERR_BADPTR);
421 safe_state = *initial_state;
424 if (process != INVALID_HANDLE)
426 if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
430 proc = get_current_process();
431 reference(&proc->header);
434 if (get_previous_mode() == USER_MODE && proc == kernel_process)
440 void *kernel_stack = malloc(KERNEL_STACK_SIZE + sizeof(uintptr_t) - 1);
441 if (kernel_stack == NULL)
447 ret = commit_pages(kernel_stack, KERNEL_STACK_SIZE);
448 if (ret != ERR_SUCCESS) goto cleanup;
450 ret = create_thread_internal(proc, &safe_state, flags, priority, kernel_stack, &thread);
451 if (ret != ERR_SUCCESS)
457 handle_t thread_handle;
458 ret = open_object(&thread->header, 0, &thread_handle);
460 EH_TRY *new_thread = thread_handle;
461 EH_CATCH syscall_close_object(thread_handle);
465 dereference(&proc->header);
469 sysret_t syscall_open_thread(dword_t tid, handle_t *handle)
472 thread_t *thread = NULL;
473 dword_t ret = ERR_NOTFOUND;
475 enter_critical(&critical);
477 if (current_thread->tid == tid)
479 thread = current_thread;
483 for (i = 0; i < THREAD_PRIORITY_MAX; i++)
485 list_entry_t *ptr = thread_queue[i].next;
487 for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
489 thread_t *entry = CONTAINER_OF(ptr, thread_t, in_queue_list);
491 if (entry->tid == tid)
501 if (thread != NULL) ret = open_object(&thread->header, 0, handle);
502 else ret = ERR_NOTFOUND;
504 leave_critical(&critical);
508 dword_t terminate_thread_internal(thread_t *thread, dword_t exit_code)
510 thread->exit_code = exit_code;
511 thread->terminating = TRUE;
515 sysret_t syscall_terminate_thread(handle_t handle, dword_t exit_code)
519 if (handle == INVALID_HANDLE)
521 thread = get_current_thread();
522 reference(&thread->header);
526 if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
529 return terminate_thread_internal(thread, exit_code);
532 sysret_t syscall_query_thread(handle_t handle, thread_info_t info_type, void *buffer, size_t size)
534 dword_t ret = ERR_SUCCESS;
538 if (get_previous_mode() == USER_MODE)
540 if (!check_usermode(buffer, size)) return ERR_BADPTR;
542 safe_buffer = malloc(size);
543 if (safe_buffer == NULL) return ERR_NOMEMORY;
544 memset(safe_buffer, 0, size);
548 safe_buffer = buffer;
551 if (handle == INVALID_HANDLE)
553 thread = get_current_thread();
554 reference(&thread->header);
558 if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
563 case THREAD_TID_INFO:
564 if (size >= sizeof(dword_t)) *((dword_t*)safe_buffer) = thread->tid;
565 else ret = ERR_SMALLBUF;
568 case THREAD_FROZEN_INFO:
569 if (size >= sizeof(int32_t)) *((int32_t*)safe_buffer) = thread->frozen;
570 else ret = ERR_SMALLBUF;
573 case THREAD_CPU_STATE_INFO:
574 if (size >= sizeof(thread_state_t))
576 if (current_thread->tid != thread->tid)
578 *((thread_state_t*)safe_buffer) = thread->state;
582 ((thread_state_t*)safe_buffer)->regs = *thread->last_context;
583 cpu_save_fpu_state(((thread_state_t*)safe_buffer)->fpu_state);
593 case THREAD_PRIORITY_INFO:
594 if (size >= sizeof(priority_t)) *((priority_t*)safe_buffer) = thread->priority;
595 else ret = ERR_SMALLBUF;
598 case THREAD_AFFINITY_INFO:
599 if (size >= sizeof(affinity_t)) *((affinity_t*)safe_buffer) = thread->affinity;
600 else ret = ERR_SMALLBUF;
606 if (get_previous_mode() == USER_MODE)
608 EH_TRY memcpy(buffer, safe_buffer, size);
609 EH_CATCH ret = ERR_BADPTR;
615 dereference(&thread->header);
619 sysret_t syscall_set_thread(handle_t handle, thread_info_t info_type, const void *buffer, size_t size)
625 if (get_previous_mode() == USER_MODE)
627 if (!check_usermode(buffer, size)) return ERR_BADPTR;
629 safe_buffer = malloc(size);
630 if (safe_buffer == NULL) return ERR_NOMEMORY;
632 EH_TRY memcpy(safe_buffer, buffer, size);
633 EH_CATCH ret = ERR_BADPTR;
638 safe_buffer = (void*)buffer;
641 if (handle == INVALID_HANDLE)
643 thread = get_current_thread();
644 reference(&thread->header);
648 if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread))
650 if (get_previous_mode() == USER_MODE) free(safe_buffer);
657 case THREAD_CPU_STATE_INFO:
658 if (size >= sizeof(thread_state_t))
660 if (thread->owner_process->pid == kernel_process->pid) return ERR_FORBIDDEN;
661 thread_state_t *new_state = (thread_state_t*)safe_buffer;
664 if (current_thread->tid != thread->tid) enter_critical(&critical);
666 if (thread->in_kernel == 0)
668 thread->state.regs.eax = new_state->regs.eax;
669 thread->state.regs.ecx = new_state->regs.ecx;
670 thread->state.regs.edx = new_state->regs.edx;
671 thread->state.regs.ebx = new_state->regs.ebx;
672 thread->state.regs.esp = new_state->regs.esp;
673 thread->state.regs.ebp = new_state->regs.ebp;
674 thread->state.regs.esi = new_state->regs.esi;
675 thread->state.regs.edi = new_state->regs.edi;
676 thread->state.regs.eip = new_state->regs.eip;
677 thread->state.regs.eflags = (thread->state.regs.eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
679 else if (thread->last_context)
681 thread->last_context->eax = new_state->regs.eax;
682 thread->last_context->ecx = new_state->regs.ecx;
683 thread->last_context->edx = new_state->regs.edx;
684 thread->last_context->ebx = new_state->regs.ebx;
685 thread->last_context->esp = new_state->regs.esp;
686 thread->last_context->ebp = new_state->regs.ebp;
687 thread->last_context->esi = new_state->regs.esi;
688 thread->last_context->edi = new_state->regs.edi;
689 thread->last_context->eip = new_state->regs.eip;
690 thread->last_context->eflags = (thread->last_context->eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
693 if (current_thread->tid != thread->tid)
695 memcpy(thread->state.fpu_state, new_state->fpu_state, sizeof(thread->state.fpu_state));
699 cpu_restore_fpu_state(new_state->fpu_state);
702 if (current_thread->tid != thread->tid) leave_critical(&critical);
711 case THREAD_PRIORITY_INFO:
712 if (size >= sizeof(priority_t)) thread->priority = *((priority_t*)safe_buffer);
713 else ret = ERR_SMALLBUF;
715 case THREAD_AFFINITY_INFO:
716 if (size >= sizeof(affinity_t)) thread->affinity = *((affinity_t*)safe_buffer);
717 else ret = ERR_SMALLBUF;
724 if (get_previous_mode() == USER_MODE) free(safe_buffer);
725 dereference(&thread->header);
729 sysret_t syscall_freeze_thread(handle_t handle)
731 dword_t ret = ERR_SUCCESS;
734 if (handle == INVALID_HANDLE)
736 thread = get_current_thread();
737 reference(&thread->header);
741 if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
746 dereference(&thread->header);
750 sysret_t syscall_thaw_thread(handle_t handle)
752 dword_t ret = ERR_SUCCESS;
755 if (handle == INVALID_HANDLE)
757 thread = get_current_thread();
758 reference(&thread->header);
762 if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
767 dereference(&thread->header);
771 void thread_init(void)
776 memset(tid_alloc_bitmap, 0, sizeof(tid_alloc_bitmap));
777 set_bit(tid_alloc_bitmap, 0);
779 thread_t *main_thread = (thread_t*)malloc(sizeof(thread_t));
780 if (main_thread == NULL) KERNEL_CRASH("Cannot allocate thread object");
782 init_object(&main_thread->header, NULL, OBJECT_THREAD);
784 if (create_object(&main_thread->header) != ERR_SUCCESS)
786 KERNEL_CRASH("Cannot initialize thread object");
789 main_thread->tid = 0;
790 main_thread->priority = THREAD_PRIORITY_MID;
791 main_thread->kernel_stack = malloc(KERNEL_STACK_SIZE);
792 ASSERT(main_thread->kernel_stack != NULL);
793 commit_pages(main_thread->kernel_stack, KERNEL_STACK_SIZE);
794 main_thread->kernel_esp = ((uintptr_t)main_thread->kernel_stack + KERNEL_STACK_SIZE) & ~3;
795 set_kernel_esp(main_thread->kernel_esp);
796 main_thread->exit_code = 0;
797 main_thread->quantum = 0;
798 main_thread->running_ticks = 0ULL;
799 main_thread->owner_process = kernel_process;
800 list_append(&kernel_process->threads, &main_thread->in_process_list);
801 main_thread->in_kernel = 1;
802 main_thread->last_context = NULL;
803 main_thread->terminated = FALSE;
804 main_thread->previous_mode = KERNEL_MODE;
805 main_thread->wait = NULL;
807 memset(&main_thread->kernel_handler, 0, sizeof(main_thread->kernel_handler));
808 memset(&main_thread->user_handler, 0, sizeof(main_thread->user_handler));
810 enter_critical(&critical);
812 current_thread = main_thread;
813 for (i = 0; i < THREAD_PRIORITY_MAX; i++) list_init(&thread_queue[i]);
814 scheduler_enabled = TRUE;
816 leave_critical(&critical);