Improve context switching and scheduler APIs.
[monolithium.git] / kernel / src / thread.c
1 /*
2  * thread.c
3  *
4  * Copyright (C) 2018 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
5  *
6  * This program is free software: you can redistribute it and/or modify
7  * it under the terms of the GNU Affero General Public License as
8  * published by the Free Software Foundation, either version 3 of the
9  * License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU Affero General Public License for more details.
15  *
16  * You should have received a copy of the GNU Affero General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <thread.h>
21 #include <timer.h>
22 #include <process.h>
23 #include <exception.h>
24 #include <syscalls.h>
25 #include <segments.h>
26 #include <heap.h>
27 #include <cpu.h>
28
29 extern void reschedule(void);
30
31 bool_t scheduler_enabled = FALSE;
32 static list_entry_t thread_queue[THREAD_PRIORITY_MAX];
33 static thread_t *current_thread = NULL;
34 static thread_t *last_fpu_thread = NULL;
35 static dword_t tid_alloc_bitmap[MAX_THREADS / 32];
36 static lock_t tid_bitmap_lock = 0;
37
38 static dword_t alloc_tid()
39 {
40     int i;
41     dword_t tid = (dword_t)-1;
42
43     acquire_lock(&tid_bitmap_lock);
44
45     for (i = 0; i < MAX_THREADS; i++)
46     {
47         if (!test_bit(tid_alloc_bitmap, i))
48         {
49             tid = i;
50             set_bit(tid_alloc_bitmap, i);
51             break;
52         }
53     }
54
55     release_lock(&tid_bitmap_lock);
56     return tid;
57 }
58
59 static inline bool_t test_condition(wait_condition_t *condition)
60 {
61     wait_condition_t **ptr;
62
63     switch (condition->type)
64     {
65     case WAIT_GROUP_ANY:
66         for (ptr = condition->conditions; *ptr; ptr++) if (test_condition(*ptr)) return TRUE;
67         return FALSE;
68     case WAIT_GROUP_ALL:
69         for (ptr = condition->conditions; *ptr; ptr++) if (!test_condition(*ptr)) return FALSE;
70         return TRUE;
71     case WAIT_ALWAYS:
72         return FALSE;
73     case WAIT_UNTIL_EQUAL:
74         return *condition->pointer == condition->value;
75     case WAIT_UNTIL_NOT_EQUAL:
76         return (*condition->pointer != condition->value);
77     case WAIT_UNTIL_LESS:
78         return (*condition->pointer < condition->value);
79     case WAIT_UNTIL_NOT_LESS:
80         return (*condition->pointer >= condition->value);
81     case WAIT_UNTIL_GREATER:
82         return (*condition->pointer > condition->value);
83     case WAIT_UNTIL_NOT_GREATER:
84         return (*condition->pointer <= condition->value);
85     default:
86         KERNEL_CRASH("Invalid wait condition value");
87         return FALSE;
88     }
89 }
90
91 static inline bool_t is_thread_ready(thread_t *thread)
92 {
93     qword_t current_time = syscall_get_milliseconds();
94
95     if (thread->terminating || thread->terminated) return FALSE;
96     if (thread->frozen > 0 && !thread->in_kernel) return FALSE;
97     if (!thread->wait) return TRUE;
98
99     if (test_condition(thread->wait->root))
100     {
101         thread->wait->result = WAIT_CONDITION_HIT;
102         thread->wait = NULL;
103         return TRUE;
104     }
105
106     if (thread->wait->timeout != NO_TIMEOUT && (current_time - thread->wait->timestamp) >= (qword_t)thread->wait->timeout)
107     {
108         thread->wait->result = WAIT_TIMED_OUT;
109         thread->wait = NULL;
110         return TRUE;
111     }
112
113     return FALSE;
114 }
115
116 static void destroy_thread(thread_t *thread)
117 {
118     list_remove(&thread->in_queue_list);
119
120     acquire_resource_exclusive(&thread->owner_process->thread_list_res);
121     list_remove(&thread->in_process_list);
122     release_resource(&thread->owner_process->thread_list_res);
123
124     free(thread->kernel_stack);
125     thread->kernel_stack = NULL;
126
127     if (thread->owner_process->threads.next == &thread->owner_process->threads)
128     {
129         destroy_process(thread->owner_process);
130     }
131
132     dereference(&thread->header);
133 }
134
135 void thread_cleanup(object_t *obj)
136 {
137     if (CONTAINER_OF(obj, thread_t, header) == last_fpu_thread) last_fpu_thread = NULL;
138 }
139
140 dword_t create_thread_internal(process_t *proc, thread_state_t *initial_state, dword_t flags, priority_t priority, void *kernel_stack, thread_t **new_thread)
141 {
142     dword_t ret;
143     if (proc->terminating) return ERR_CANCELED;
144
145     thread_t *thread = (thread_t*)malloc(sizeof(thread_t));
146     if (thread == NULL) return ERR_NOMEMORY;
147
148     init_object(&thread->header, NULL, OBJECT_THREAD);
149
150     ret = create_object(&thread->header);
151     if (ret != ERR_SUCCESS)
152     {
153         free(thread);
154         return ret;
155     }
156
157     thread->tid = alloc_tid();
158     if (thread->tid == (dword_t)-1)
159     {
160         ret = ERR_NOMEMORY;
161         goto cleanup;
162     }
163
164     thread->priority = priority;
165     thread->quantum = QUANTUM;
166     thread->frozen = (flags & THREAD_CREATE_FROZEN) ? TRUE : FALSE;
167     thread->running_ticks = 0ULL;
168     thread->owner_process = proc;
169     thread->exit_code = 0;
170     thread->terminating = FALSE;
171     thread->terminated = FALSE;
172     thread->in_kernel = 0;
173     thread->last_context = NULL;
174     thread->wait = NULL;
175     memset(&thread->kernel_handler, 0, sizeof(thread->kernel_handler));
176     memset(&thread->user_handler, 0, sizeof(thread->user_handler));
177
178     thread->state = *initial_state;
179     thread->state.regs.eflags = 0x202;
180
181     if (proc != kernel_process)
182     {
183         thread->previous_mode = USER_MODE;
184
185         thread->state.regs.cs = get_user_code_selector();
186         thread->state.regs.data_selector = get_user_data_selector();
187     }
188     else
189     {
190         thread->previous_mode = KERNEL_MODE;
191
192         thread->state.regs.cs = get_kernel_code_selector();
193         thread->state.regs.data_selector = get_kernel_data_selector();
194     }
195
196     thread->kernel_stack = kernel_stack;
197     thread->kernel_esp = ((uintptr_t)thread->kernel_stack + KERNEL_STACK_SIZE + 3) & ~3;
198
199     acquire_resource_exclusive(&thread->owner_process->thread_list_res);
200     list_append(&proc->threads, &thread->in_process_list);
201     release_resource(&thread->owner_process->thread_list_res);
202
203     critical_t critical;
204     enter_critical(&critical);
205     list_append(&thread_queue[priority], &thread->in_queue_list);
206     leave_critical(&critical);
207
208     *new_thread = thread;
209     ret = ERR_SUCCESS;
210
211 cleanup:
212     if (ret != ERR_SUCCESS)
213     {
214         if (thread->kernel_stack) free(thread->kernel_stack);
215         if (thread != NULL) dereference(&thread->header);
216
217         if (thread->tid != (dword_t)-1)
218         {
219             acquire_lock(&tid_bitmap_lock);
220             clear_bit(tid_alloc_bitmap, thread->tid);
221             release_lock(&tid_bitmap_lock);
222         }
223     }
224
225     return ret;
226 }
227
228 thread_t *get_current_thread()
229 {
230     return current_thread;
231 }
232
233 void thread_lazy_fpu(void)
234 {
235     if (last_fpu_thread) fpu_save(last_fpu_thread->state.fpu_state);
236     fpu_restore(current_thread->state.fpu_state);
237     last_fpu_thread = current_thread;
238     asm volatile ("clts");
239 }
240
241 void scheduler(registers_t *regs)
242 {
243     int i;
244     critical_t critical;
245     enter_critical(&critical);
246
247     if (current_thread->quantum == 0)
248     {
249         list_append(&thread_queue[current_thread->priority], &current_thread->in_queue_list);
250         thread_t *next_thread = NULL;
251
252         for (i = 0; i < THREAD_PRIORITY_MAX; i++)
253         {
254             list_entry_t *ptr;
255
256             for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
257             {
258                 thread_t *thread = CONTAINER_OF(ptr, thread_t, in_queue_list);
259
260                 if (is_thread_ready(thread))
261                 {
262                     next_thread = thread;
263                     goto found;
264                 }
265             }
266         }
267
268 found:
269         ASSERT(next_thread != NULL);
270         list_remove(&next_thread->in_queue_list);
271
272         if (current_thread != next_thread)
273         {
274             memcpy(&current_thread->state.regs, regs, sizeof(registers_t));
275
276             current_thread->kernel_esp = regs->esp;
277             current_thread->state.regs.esp = ((registers_ext_t*)regs)->esp3;
278
279             set_kernel_esp(next_thread->kernel_esp);
280
281             asm volatile ("pushl %eax\n"
282                           "movl %cr4, %eax\n"
283                           "orb $0x08, %al\n"
284                           "movl %eax, %cr4\n"
285                           "popl %eax\n");
286
287             if (SEGMENT_RPL(next_thread->state.regs.cs) != 0)
288             {
289                 push_to_stack(&next_thread->kernel_esp, get_user_data_selector());
290                 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
291             }
292
293             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eflags);
294             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.cs);
295             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eip);
296             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.error_code);
297             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eax);
298             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ecx);
299             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edx);
300             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebx);
301             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
302             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebp);
303             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esi);
304             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edi);
305             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.data_selector);
306
307             regs->esp = next_thread->kernel_esp;
308             regs->error_code = CONTEXT_SWITCH_MAGIC;
309
310             if (current_thread->owner_process != next_thread->owner_process)
311             {
312                 set_page_directory(next_thread->owner_process->memory_space.page_directory);
313             }
314         }
315
316         if (current_thread->owner_process != kernel_process)
317         {
318             bump_address_space(&current_thread->owner_process->memory_space);
319         }
320
321         if (current_thread->terminated) destroy_thread(current_thread);
322         current_thread = next_thread;
323         current_thread->quantum = QUANTUM;
324     }
325     else
326     {
327         current_thread->quantum--;
328     }
329
330     leave_critical(&critical);
331 }
332
333 wait_result_t scheduler_wait(wait_condition_t *condition, dword_t timeout)
334 {
335     if (test_condition(condition)) return WAIT_CONDITION_HIT;
336     if (timeout == 0) return WAIT_TIMED_OUT;
337
338     wait_t wait = { .root = condition, .timeout = timeout,  .timestamp = syscall_get_milliseconds(), .result = WAIT_CANCELED };
339     while (!__sync_bool_compare_and_swap(&current_thread->wait, NULL, &wait)) continue;
340     syscall_yield_quantum();
341
342     return wait.result;
343 }
344
345 sysret_t syscall_sleep(qword_t milliseconds)
346 {
347     wait_condition_t condition = { .type = WAIT_ALWAYS };
348     return scheduler_wait(&condition, milliseconds) == WAIT_CANCELED ? ERR_CANCELED : ERR_SUCCESS;
349 }
350
351 sysret_t syscall_yield_quantum()
352 {
353     current_thread->quantum = 0;
354     reschedule();
355     return ERR_SUCCESS;
356 }
357
358 dword_t create_system_thread(thread_procedure_t routine, dword_t flags, priority_t priority, dword_t stack_size, void *param, thread_t **new_thread)
359 {
360     thread_state_t initial_state;
361     memset(&initial_state, 0, sizeof(initial_state));
362
363     if (!stack_size) stack_size = KERNEL_STACK_SIZE;
364
365     void *kernel_stack = malloc(stack_size + sizeof(uintptr_t) - 1);
366     if (kernel_stack == NULL) return ERR_NOMEMORY;
367
368     dword_t ret = commit_pages(kernel_stack, stack_size);
369     if (ret != ERR_SUCCESS)
370     {
371         free(kernel_stack);
372         return ret;
373     }
374
375     initial_state.regs.eip = (dword_t)routine;
376     initial_state.regs.esp = ((dword_t)kernel_stack + stack_size + 3) & ~3;
377
378     push_to_stack((uintptr_t*)&initial_state.regs.esp, (uintptr_t)param);
379
380     return create_thread_internal(kernel_process, &initial_state, flags, priority, kernel_stack, new_thread);
381 }
382
383 sysret_t syscall_create_thread(handle_t process, thread_state_t *initial_state, dword_t flags, priority_t priority, handle_t *new_thread)
384 {
385     dword_t ret;
386     thread_state_t safe_state;
387     process_t *proc;
388     thread_t *thread;
389
390     if (get_previous_mode() == USER_MODE)
391     {
392         if (!check_usermode(initial_state, sizeof(initial_state))) return ERR_BADPTR;
393         if (!check_usermode(new_thread, sizeof(*new_thread))) return ERR_BADPTR;
394
395         EH_TRY safe_state = *initial_state;
396         EH_CATCH EH_ESCAPE(return ERR_BADPTR);
397         EH_DONE;
398     }
399     else
400     {
401         safe_state = *initial_state;
402     }
403
404     if (process != INVALID_HANDLE)
405     {
406         if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
407     }
408     else
409     {
410         proc = get_current_process();
411         reference(&proc->header);
412     }
413
414     if (get_previous_mode() == USER_MODE && proc == kernel_process)
415     {
416         ret = ERR_FORBIDDEN;
417         goto cleanup;
418     }
419
420     void *kernel_stack = malloc(KERNEL_STACK_SIZE + sizeof(uintptr_t) - 1);
421     if (kernel_stack == NULL)
422     {
423         ret = ERR_NOMEMORY;
424         goto cleanup;
425     }
426
427     ret = commit_pages(kernel_stack, KERNEL_STACK_SIZE);
428     if (ret != ERR_SUCCESS) goto cleanup;
429
430     ret = create_thread_internal(proc, &safe_state, flags, priority, kernel_stack, &thread);
431     if (ret != ERR_SUCCESS)
432     {
433         free(kernel_stack);
434         goto cleanup;
435     }
436
437     handle_t thread_handle;
438     ret = open_object(&thread->header, 0, &thread_handle);
439
440     EH_TRY *new_thread = thread_handle;
441     EH_CATCH syscall_close_object(thread_handle);
442     EH_DONE;
443
444 cleanup:
445     dereference(&proc->header);
446     return ret;
447 }
448
449 sysret_t syscall_open_thread(dword_t tid, handle_t *handle)
450 {
451     int i;
452     thread_t *thread = NULL;
453     dword_t ret = ERR_NOTFOUND;
454     critical_t critical;
455     enter_critical(&critical);
456
457     if (current_thread->tid == tid)
458     {
459         thread = current_thread;
460     }
461     else
462     {
463         for (i = 0; i < THREAD_PRIORITY_MAX; i++)
464         {
465             list_entry_t *ptr = thread_queue[i].next;
466
467             for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
468             {
469                 thread_t *entry = CONTAINER_OF(ptr, thread_t, in_queue_list);
470
471                 if (entry->tid == tid)
472                 {
473                     thread = entry;
474                     goto found;
475                 }
476             }
477         }
478     }
479
480 found:
481     if (thread != NULL) ret = open_object(&thread->header, 0, handle);
482     else ret = ERR_NOTFOUND;
483
484     leave_critical(&critical);
485     return ret;
486 }
487
488 dword_t terminate_thread_internal(thread_t *thread, dword_t exit_code)
489 {
490     critical_t critical;
491     thread->terminating = TRUE;
492
493     if (thread != current_thread)
494     {
495         wait_condition_t cond = { .type = WAIT_UNTIL_EQUAL, .pointer = (dword_t*)&thread->in_kernel, .value = 0 };
496         wait_result_t result = scheduler_wait(&cond, NO_TIMEOUT);
497         if (result == WAIT_CANCELED) return ERR_CANCELED;
498     }
499
500     enter_critical(&critical);
501
502     thread->exit_code = exit_code;
503     thread->terminated = TRUE;
504     if (thread != current_thread) destroy_thread(thread);
505
506     leave_critical(&critical);
507
508     if (thread == current_thread) syscall_yield_quantum();
509     return ERR_SUCCESS;
510 }
511
512 sysret_t syscall_terminate_thread(handle_t handle, dword_t exit_code)
513 {
514     thread_t *thread;
515
516     if (handle == INVALID_HANDLE)
517     {
518         thread = get_current_thread();
519         reference(&thread->header);
520     }
521     else
522     {
523         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
524     }
525
526     return terminate_thread_internal(thread, exit_code);
527 }
528
529 sysret_t syscall_query_thread(handle_t handle, thread_info_t info_type, void *buffer, size_t size)
530 {
531     dword_t ret = ERR_SUCCESS;
532     thread_t *thread;
533     void *safe_buffer;
534
535     if (get_previous_mode() == USER_MODE)
536     {
537         if (!check_usermode(buffer, size)) return ERR_BADPTR;
538
539         safe_buffer = malloc(size);
540         if (safe_buffer == NULL) return ERR_NOMEMORY;
541         memset(safe_buffer, 0, size);
542     }
543     else
544     {
545         safe_buffer = buffer;
546     }
547
548     if (handle == INVALID_HANDLE)
549     {
550         thread = get_current_thread();
551         reference(&thread->header);
552     }
553     else
554     {
555         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
556     }
557
558     switch (info_type)
559     {
560         case THREAD_TID_INFO:
561             if (size >= sizeof(dword_t)) *((dword_t*)safe_buffer) = thread->tid;
562             else ret = ERR_SMALLBUF;
563             break;
564
565         case THREAD_FROZEN_INFO:
566             if (size >= sizeof(int32_t)) *((int32_t*)safe_buffer) = thread->frozen;
567             else ret = ERR_SMALLBUF;
568             break;
569
570         case THREAD_CPU_STATE_INFO:
571             if (size >= sizeof(thread_state_t))
572             {
573                 if (current_thread->tid != thread->tid)
574                 {
575                     *((thread_state_t*)safe_buffer) = thread->state;
576                 }
577                 else
578                 {
579                     ((thread_state_t*)safe_buffer)->regs = *thread->last_context;
580                     fpu_save(((thread_state_t*)safe_buffer)->fpu_state);
581                 }
582             }
583             else
584             {
585                 ret = ERR_SMALLBUF;
586             }
587
588             break;
589
590         case THREAD_PRIORITY_INFO:
591             if (size >= sizeof(priority_t)) *((priority_t*)safe_buffer) = thread->priority;
592             else ret = ERR_SMALLBUF;
593             break;
594
595         case THREAD_AFFINITY_INFO:
596             if (size >= sizeof(affinity_t)) *((affinity_t*)safe_buffer) = thread->affinity;
597             else ret = ERR_SMALLBUF;
598
599         default:
600             ret = ERR_INVALID;
601     }
602
603     if (get_previous_mode() == USER_MODE)
604     {
605         EH_TRY memcpy(buffer, safe_buffer, size);
606         EH_CATCH ret = ERR_BADPTR;
607         EH_DONE;
608
609         free(safe_buffer);
610     }
611
612     dereference(&thread->header);
613     return ret;
614 }
615
616 sysret_t syscall_set_thread(handle_t handle, thread_info_t info_type, const void *buffer, size_t size)
617 {
618     dword_t ret;
619     thread_t *thread;
620     void *safe_buffer;
621
622     if (get_previous_mode() == USER_MODE)
623     {
624         if (!check_usermode(buffer, size)) return ERR_BADPTR;
625
626         safe_buffer = malloc(size);
627         if (safe_buffer == NULL) return ERR_NOMEMORY;
628
629         EH_TRY memcpy(safe_buffer, buffer, size);
630         EH_CATCH ret = ERR_BADPTR;
631         EH_DONE;
632     }
633     else
634     {
635         safe_buffer = (void*)buffer;
636     }
637
638     if (handle == INVALID_HANDLE)
639     {
640         thread = get_current_thread();
641         reference(&thread->header);
642     }
643     else
644     {
645         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread))
646         {
647             if (get_previous_mode() == USER_MODE) free(safe_buffer);
648             return ERR_INVALID;
649         }
650     }
651
652     switch (info_type)
653     {
654     case THREAD_CPU_STATE_INFO:
655         if (size >= sizeof(thread_state_t))
656         {
657             if (thread->owner_process->pid == kernel_process->pid) return ERR_FORBIDDEN;
658             thread_state_t *new_state = (thread_state_t*)safe_buffer;
659
660             critical_t critical;
661             if (current_thread->tid != thread->tid) enter_critical(&critical);
662
663             if (thread->in_kernel == 0)
664             {
665                 thread->state.regs.eax = new_state->regs.eax;
666                 thread->state.regs.ecx = new_state->regs.ecx;
667                 thread->state.regs.edx = new_state->regs.edx;
668                 thread->state.regs.ebx = new_state->regs.ebx;
669                 thread->state.regs.esp = new_state->regs.esp;
670                 thread->state.regs.ebp = new_state->regs.ebp;
671                 thread->state.regs.esi = new_state->regs.esi;
672                 thread->state.regs.edi = new_state->regs.edi;
673                 thread->state.regs.eip = new_state->regs.eip;
674                 thread->state.regs.eflags = (thread->state.regs.eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
675             }
676             else if (thread->last_context)
677             {
678                 thread->last_context->eax = new_state->regs.eax;
679                 thread->last_context->ecx = new_state->regs.ecx;
680                 thread->last_context->edx = new_state->regs.edx;
681                 thread->last_context->ebx = new_state->regs.ebx;
682                 thread->last_context->esp = new_state->regs.esp;
683                 thread->last_context->ebp = new_state->regs.ebp;
684                 thread->last_context->esi = new_state->regs.esi;
685                 thread->last_context->edi = new_state->regs.edi;
686                 thread->last_context->eip = new_state->regs.eip;
687                 thread->last_context->eflags = (thread->last_context->eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
688             }
689
690             if (current_thread->tid != thread->tid)
691             {
692                 memcpy(thread->state.fpu_state, new_state->fpu_state, sizeof(thread->state.fpu_state));
693             }
694             else
695             {
696                 fpu_restore(new_state->fpu_state);
697             }
698
699             if (current_thread->tid != thread->tid) leave_critical(&critical);
700         }
701         else
702         {
703             ret = ERR_SMALLBUF;
704         }
705
706         break;
707
708     case THREAD_PRIORITY_INFO:
709         if (size >= sizeof(priority_t)) thread->priority = *((priority_t*)safe_buffer);
710         else ret = ERR_SMALLBUF;
711
712     case THREAD_AFFINITY_INFO:
713         if (size >= sizeof(affinity_t)) thread->affinity = *((affinity_t*)safe_buffer);
714         else ret = ERR_SMALLBUF;
715         break;
716
717     default:
718         ret = ERR_INVALID;
719     }
720
721     if (get_previous_mode() == USER_MODE) free(safe_buffer);
722     dereference(&thread->header);
723     return ret;
724 }
725
726 sysret_t syscall_wait_thread(handle_t handle, dword_t timeout)
727 {
728     dword_t ret;
729     thread_t *thread;
730
731     if (handle == INVALID_HANDLE)
732     {
733         thread = get_current_thread();
734         reference(&thread->header);
735     }
736     else
737     {
738         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
739     }
740
741     wait_condition_t condition = { .type = WAIT_UNTIL_NOT_EQUAL, .pointer = &thread->terminated, .value = FALSE };
742     ret = scheduler_wait(&condition, timeout);
743
744     dereference(&thread->header);
745     return ret;
746 }
747
748 sysret_t syscall_freeze_thread(handle_t handle)
749 {
750     dword_t ret = ERR_SUCCESS;
751     thread_t *thread;
752
753     if (handle == INVALID_HANDLE)
754     {
755         thread = get_current_thread();
756         reference(&thread->header);
757     }
758     else
759     {
760         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
761     }
762
763     thread->frozen++;
764
765     dereference(&thread->header);
766     return ret;
767 }
768
769 sysret_t syscall_thaw_thread(handle_t handle)
770 {
771     dword_t ret = ERR_SUCCESS;
772     thread_t *thread;
773
774     if (handle == INVALID_HANDLE)
775     {
776         thread = get_current_thread();
777         reference(&thread->header);
778     }
779     else
780     {
781         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
782     }
783
784     thread->frozen--;
785
786     dereference(&thread->header);
787     return ret;
788 }
789
790 void thread_init(void)
791 {
792     int i;
793     critical_t critical;
794
795     memset(tid_alloc_bitmap, 0, sizeof(tid_alloc_bitmap));
796     set_bit(tid_alloc_bitmap, 0);
797
798     thread_t *main_thread = (thread_t*)malloc(sizeof(thread_t));
799     if (main_thread == NULL) KERNEL_CRASH("Cannot allocate thread object");
800
801     init_object(&main_thread->header, NULL, OBJECT_THREAD);
802
803     if (create_object(&main_thread->header) != ERR_SUCCESS)
804     {
805         KERNEL_CRASH("Cannot initialize thread object");
806     }
807
808     main_thread->tid = 0;
809     main_thread->priority = THREAD_PRIORITY_MID;
810     main_thread->kernel_stack = malloc(KERNEL_STACK_SIZE);
811     ASSERT(main_thread->kernel_stack != NULL);
812     commit_pages(main_thread->kernel_stack, KERNEL_STACK_SIZE);
813     main_thread->kernel_esp = ((uintptr_t)main_thread->kernel_stack + KERNEL_STACK_SIZE) & ~3;
814     set_kernel_esp(main_thread->kernel_esp);
815     main_thread->exit_code = 0;
816     main_thread->quantum = 0;
817     main_thread->running_ticks = 0ULL;
818     main_thread->owner_process = kernel_process;
819     list_append(&kernel_process->threads, &main_thread->in_process_list);
820     main_thread->in_kernel = 1;
821     main_thread->last_context = NULL;
822     main_thread->terminated = FALSE;
823     main_thread->previous_mode = KERNEL_MODE;
824     main_thread->wait = NULL;
825
826     memset(&main_thread->kernel_handler, 0, sizeof(main_thread->kernel_handler));
827     memset(&main_thread->user_handler, 0, sizeof(main_thread->user_handler));
828
829     enter_critical(&critical);
830
831     current_thread = main_thread;
832     for (i = 0; i < THREAD_PRIORITY_MAX; i++) list_init(&thread_queue[i]);
833     scheduler_enabled = TRUE;
834
835     leave_critical(&critical);
836 }