7f17d3b317ab3f00ad103c2a0c540042ef83309e
[monolithium.git] / kernel / src / thread.c
1 /*
2  * thread.c
3  *
4  * Copyright (C) 2016 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
5  *
6  * This program is free software: you can redistribute it and/or modify
7  * it under the terms of the GNU Affero General Public License as
8  * published by the Free Software Foundation, either version 3 of the
9  * License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU Affero General Public License for more details.
15  *
16  * You should have received a copy of the GNU Affero General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <thread.h>
21 #include <timer.h>
22 #include <process.h>
23 #include <exception.h>
24 #include <syscalls.h>
25 #include <segments.h>
26 #include <heap.h>
27 #include <cpu.h>
28
29 extern void reschedule(void);
30
31 bool_t scheduler_enabled = FALSE;
32 static list_entry_t thread_queue[THREAD_PRIORITY_MAX];
33 static thread_t *current_thread = NULL;
34 static dword_t tid_alloc_bitmap[MAX_THREADS / 32];
35 static lock_t tid_bitmap_lock = 0;
36
37 static dword_t alloc_tid()
38 {
39     int i;
40     dword_t tid = (dword_t)-1;
41
42     acquire_lock(&tid_bitmap_lock);
43
44     for (i = 0; i < MAX_THREADS; i++)
45     {
46         if (!test_bit(tid_alloc_bitmap, i))
47         {
48             tid = i;
49             set_bit(tid_alloc_bitmap, i);
50             break;
51         }
52     }
53
54     release_lock(&tid_bitmap_lock);
55     return tid;
56 }
57
58 static inline bool_t test_condition(wait_condition_t condition, dword_t *pointer, dword_t value)
59 {
60     bool_t satisfied;
61
62     switch (condition)
63     {
64     case WAIT_NEVER:
65         satisfied = TRUE;
66         break;
67
68     case WAIT_ALWAYS:
69         satisfied = FALSE;
70         break;
71
72     case WAIT_UNTIL_EQUAL:
73         satisfied = (*pointer == value);
74         break;
75
76     case WAIT_UNTIL_NOT_EQUAL:
77         satisfied = (*pointer != value);
78         break;
79
80     case WAIT_UNTIL_LESS:
81         satisfied = (*pointer < value);
82         break;
83
84     case WAIT_UNTIL_NOT_LESS:
85         satisfied = (*pointer >= value);
86         break;
87
88     case WAIT_UNTIL_GREATER:
89         satisfied = (*pointer > value);
90         break;
91
92     case WAIT_UNTIL_NOT_GREATER:
93         satisfied = (*pointer <= value);
94         break;
95
96     default:
97         KERNEL_CRASH("Invalid wait condition value");
98         break;
99     }
100
101     return satisfied;
102 }
103
104 static inline bool_t is_thread_ready(thread_t *thread)
105 {
106     qword_t current_time = get_milliseconds();
107
108     if (thread->terminated) return FALSE;
109     if (thread->frozen > 0 && !thread->syscall_lock) return FALSE;
110
111     if (test_condition(thread->wait_condition, thread->wait_pointer, thread->wait_value))
112     {
113         thread->wait_condition = WAIT_NEVER;
114         thread->wait_result = WAIT_CONDITION_HIT;
115         return TRUE;
116     }
117
118     if (thread->wait_timeout != 0ULL && (current_time - thread->wait_timestamp) >= (qword_t)thread->wait_timeout)
119     {
120         thread->wait_condition = WAIT_NEVER;
121         thread->wait_result = WAIT_TIMED_OUT;
122         return TRUE;
123     }
124
125     if (thread->cancel_io)
126     {
127         thread->wait_condition = WAIT_NEVER;
128         thread->wait_result = WAIT_CANCELED;
129         return TRUE;
130     }
131
132     return FALSE;
133 }
134
135 static void destroy_thread(thread_t *thread)
136 {
137     list_remove(&thread->in_queue_list);
138
139     acquire_resource_exclusive(&thread->owner_process->thread_list_res);
140     list_remove(&thread->in_process_list);
141     release_resource(&thread->owner_process->thread_list_res);
142
143     free(thread->kernel_stack);
144     thread->kernel_stack = NULL;
145
146     if (thread->owner_process->threads.next == &thread->owner_process->threads)
147     {
148         destroy_process(thread->owner_process);
149     }
150
151     dereference(&thread->header);
152 }
153
154 dword_t create_thread_internal(process_t *proc, thread_state_t *initial_state, dword_t flags, priority_t priority, void *kernel_stack, thread_t **new_thread)
155 {
156     dword_t ret;
157     if (proc->terminating) return ERR_CANCELED;
158
159     thread_t *thread = (thread_t*)malloc(sizeof(thread_t));
160     if (thread == NULL) return ERR_NOMEMORY;
161
162     thread->header.name = NULL;
163     thread->header.type = OBJECT_THREAD;
164
165     ret = create_object(&thread->header);
166     if (ret != ERR_SUCCESS)
167     {
168         free(thread);
169         return ret;
170     }
171
172     thread->tid = alloc_tid();
173     if (thread->tid == (dword_t)-1)
174     {
175         ret = ERR_NOMEMORY;
176         goto cleanup;
177     }
178
179     thread->priority = priority;
180     thread->quantum = QUANTUM;
181     thread->frozen = (flags & THREAD_CREATE_FROZEN) ? TRUE : FALSE;
182     thread->running_ticks = 0ULL;
183     thread->owner_process = proc;
184     thread->exit_code = 0;
185     thread->terminated = FALSE;
186     thread->cancel_io = FALSE;
187     thread->syscall_lock = 0;
188     thread->wait_condition = WAIT_NEVER;
189     thread->wait_timestamp = 0ULL;
190     thread->wait_timeout = 0;
191     thread->wait_pointer = NULL;
192     thread->wait_value = 0;
193     memset(&thread->kernel_handler, 0, sizeof(thread->kernel_handler));
194     memset(&thread->user_handler, 0, sizeof(thread->user_handler));
195
196     thread->state = *initial_state;
197     thread->state.regs.eflags = 0x202;
198
199     if (proc != kernel_process)
200     {
201         thread->previous_mode = USER_MODE;
202
203         thread->state.regs.cs = get_user_code_selector();
204         thread->state.regs.data_selector = get_user_data_selector();
205     }
206     else
207     {
208         thread->previous_mode = KERNEL_MODE;
209
210         thread->state.regs.cs = get_kernel_code_selector();
211         thread->state.regs.data_selector = get_kernel_data_selector();
212     }
213
214     thread->kernel_stack = kernel_stack;
215     thread->kernel_esp = ((uintptr_t)thread->kernel_stack + KERNEL_STACK_SIZE + 3) & ~3;
216
217     acquire_resource_exclusive(&thread->owner_process->thread_list_res);
218     list_append(&proc->threads, &thread->in_process_list);
219     release_resource(&thread->owner_process->thread_list_res);
220
221     critical_t critical;
222     enter_critical(&critical);
223     list_append(&thread_queue[priority], &thread->in_queue_list);
224     leave_critical(&critical);
225
226     *new_thread = thread;
227     ret = ERR_SUCCESS;
228
229 cleanup:
230     if (ret != ERR_SUCCESS)
231     {
232         if (thread->kernel_stack) free(thread->kernel_stack);
233         if (thread != NULL) dereference(&thread->header);
234
235         if (thread->tid != (dword_t)-1)
236         {
237             acquire_lock(&tid_bitmap_lock);
238             clear_bit(tid_alloc_bitmap, thread->tid);
239             release_lock(&tid_bitmap_lock);
240         }
241     }
242
243     return ret;
244 }
245
246 thread_t *get_current_thread()
247 {
248     return current_thread;
249 }
250
251 dword_t get_thread_id()
252 {
253     return current_thread->tid;
254 }
255
256 void scheduler(registers_t *regs)
257 {
258     int i;
259     critical_t critical;
260     enter_critical(&critical);
261
262     if (current_thread->quantum == 0)
263     {
264         list_append(&thread_queue[current_thread->priority], &current_thread->in_queue_list);
265         thread_t *next_thread = NULL;
266
267         for (i = 0; i < THREAD_PRIORITY_MAX; i++)
268         {
269             list_entry_t *ptr;
270
271             for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
272             {
273                 thread_t *thread = CONTAINER_OF(ptr, thread_t, in_queue_list);
274
275                 if (is_thread_ready(thread))
276                 {
277                     next_thread = thread;
278                     goto found;
279                 }
280             }
281         }
282
283 found:
284         ASSERT(next_thread != NULL);
285         list_remove(&next_thread->in_queue_list);
286
287         if (current_thread != next_thread)
288         {
289             memcpy(&current_thread->state.regs, regs, sizeof(registers_t));
290             if (cpu_features[0] & CPU_FEATURE_FPU) fpu_save(current_thread->state.fpu_state);
291
292             current_thread->kernel_esp = regs->esp;
293             current_thread->state.regs.esp = ((registers_ext_t*)regs)->esp3;
294
295             set_kernel_esp(next_thread->kernel_esp);
296             if (cpu_features[0] & CPU_FEATURE_FPU) fpu_restore(next_thread->state.fpu_state);
297
298             if (SEGMENT_RPL(next_thread->state.regs.cs) != 0)
299             {
300                 push_to_stack(&next_thread->kernel_esp, get_user_data_selector());
301                 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
302             }
303
304             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eflags);
305             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.cs);
306             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eip);
307             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.error_code);
308             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eax);
309             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ecx);
310             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edx);
311             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebx);
312             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
313             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebp);
314             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esi);
315             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edi);
316             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.data_selector);
317
318             regs->esp = next_thread->kernel_esp;
319             regs->error_code = CONTEXT_SWITCH_MAGIC;
320
321             if (current_thread->owner_process != next_thread->owner_process)
322             {
323                 set_page_directory(next_thread->owner_process->memory_space.page_directory);
324             }
325         }
326
327         if (current_thread->owner_process != kernel_process)
328         {
329             bump_address_space(&current_thread->owner_process->memory_space);
330         }
331
332         if (current_thread->terminated) destroy_thread(current_thread);
333         current_thread = next_thread;
334         current_thread->quantum = QUANTUM;
335     }
336     else
337     {
338         current_thread->quantum--;
339     }
340
341     leave_critical(&critical);
342 }
343
344 wait_result_t scheduler_wait(wait_condition_t condition, dword_t timeout, uintptr_t *pointer, uintptr_t value)
345 {
346     if (test_condition(condition, pointer, value)) return WAIT_CONDITION_HIT;
347     if (timeout == 0) return WAIT_TIMED_OUT;
348
349     critical_t critical;
350     enter_critical(&critical);
351
352     if (timeout != NO_TIMEOUT)
353     {
354         current_thread->wait_timestamp = get_milliseconds();
355         current_thread->wait_timeout = timeout;
356     }
357     else
358     {
359         current_thread->wait_timestamp = 0ULL;
360         current_thread->wait_timeout = 0;
361     }
362
363     current_thread->wait_pointer = pointer;
364     current_thread->wait_value = value;
365     current_thread->wait_condition = condition;
366
367     leave_critical(&critical);
368     yield_quantum();
369
370     return current_thread->wait_result;
371 }
372
373 void sleep(qword_t milliseconds)
374 {
375     scheduler_wait(WAIT_ALWAYS, milliseconds, NULL, 0);
376 }
377
378 void yield_quantum()
379 {
380     current_thread->quantum = 0;
381     reschedule();
382 }
383
384 dword_t create_system_thread(thread_procedure_t routine, dword_t flags, priority_t priority, dword_t stack_size, void *param, thread_t **new_thread)
385 {
386     thread_state_t initial_state;
387     memset(&initial_state, 0, sizeof(initial_state));
388
389     if (!stack_size) stack_size = KERNEL_STACK_SIZE;
390
391     void *kernel_stack = malloc(stack_size + sizeof(uintptr_t) - 1);
392     if (kernel_stack == NULL) return ERR_NOMEMORY;
393
394     dword_t ret = commit_pages(kernel_stack, stack_size);
395     if (ret != ERR_SUCCESS)
396     {
397         free(kernel_stack);
398         return ret;
399     }
400
401     initial_state.regs.eip = (dword_t)routine;
402     initial_state.regs.esp = ((dword_t)kernel_stack + stack_size + 3) & ~3;
403
404     push_to_stack((uintptr_t*)&initial_state.regs.esp, (uintptr_t)param);
405
406     return create_thread_internal(kernel_process, &initial_state, flags, priority, kernel_stack, new_thread);
407 }
408
409 dword_t create_thread(handle_t process, thread_state_t *initial_state, dword_t flags, priority_t priority, handle_t *new_thread)
410 {
411     dword_t ret;
412     thread_state_t safe_state;
413     process_t *proc;
414     thread_t *thread;
415
416     if (get_previous_mode() == USER_MODE)
417     {
418         if (!check_usermode(initial_state, sizeof(initial_state))) return ERR_BADPTR;
419         if (!check_usermode(new_thread, sizeof(*new_thread))) return ERR_BADPTR;
420
421         EH_TRY safe_state = *initial_state;
422         EH_CATCH EH_ESCAPE(return ERR_BADPTR);
423         EH_DONE;
424     }
425     else
426     {
427         safe_state = *initial_state;
428     }
429
430     if (process != INVALID_HANDLE)
431     {
432         if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
433     }
434     else
435     {
436         proc = get_current_process();
437         reference(&proc->header);
438     }
439
440     if (get_previous_mode() == USER_MODE && proc == kernel_process)
441     {
442         ret = ERR_FORBIDDEN;
443         goto cleanup;
444     }
445
446     void *kernel_stack = malloc(KERNEL_STACK_SIZE + sizeof(uintptr_t) - 1);
447     if (kernel_stack == NULL)
448     {
449         ret = ERR_NOMEMORY;
450         goto cleanup;
451     }
452
453     ret = commit_pages(kernel_stack, KERNEL_STACK_SIZE);
454     if (ret != ERR_SUCCESS) goto cleanup;
455
456     ret = create_thread_internal(proc, &safe_state, flags, priority, kernel_stack, &thread);
457     if (ret != ERR_SUCCESS)
458     {
459         free(kernel_stack);
460         goto cleanup;
461     }
462
463     handle_t thread_handle;
464     ret = open_object(&thread->header, 0, &thread_handle);
465
466     EH_TRY *new_thread = thread_handle;
467     EH_CATCH close_object(thread_handle);
468     EH_DONE;
469
470 cleanup:
471     dereference(&proc->header);
472     return ret;
473 }
474
475 dword_t open_thread(dword_t tid, handle_t *handle)
476 {
477     int i;
478     thread_t *thread = NULL;
479     dword_t ret = ERR_NOTFOUND;
480     critical_t critical;
481     enter_critical(&critical);
482
483     if (get_thread_id() == tid)
484     {
485         thread = current_thread;
486     }
487     else
488     {
489         for (i = 0; i < THREAD_PRIORITY_MAX; i++)
490         {
491             list_entry_t *ptr = thread_queue[i].next;
492
493             for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
494             {
495                 thread_t *entry = CONTAINER_OF(ptr, thread_t, in_queue_list);
496
497                 if (entry->tid == tid)
498                 {
499                     thread = entry;
500                     goto found;
501                 }
502             }
503         }
504     }
505
506 found:
507     if (thread != NULL) ret = open_object(&thread->header, 0, handle);
508     else ret = ERR_NOTFOUND;
509
510     leave_critical(&critical);
511     return ret;
512 }
513
514 dword_t terminate_thread_internal(thread_t *thread, dword_t exit_code)
515 {
516     critical_t critical;
517     thread->cancel_io = TRUE;
518
519     if (thread != current_thread) acquire_lock(&thread->syscall_lock);
520     enter_critical(&critical);
521
522     thread->exit_code = exit_code;
523     thread->terminated = TRUE;
524     if (thread != current_thread) destroy_thread(thread);
525
526     leave_critical(&critical);
527
528     if (thread == current_thread) yield_quantum();
529     return ERR_SUCCESS;
530 }
531
532 dword_t terminate_thread(handle_t handle, dword_t exit_code)
533 {
534     thread_t *thread;
535
536     if (handle == INVALID_HANDLE)
537     {
538         thread = get_current_thread();
539         reference(&thread->header);
540     }
541     else
542     {
543         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
544     }
545
546     return terminate_thread_internal(thread, exit_code);
547 }
548
549 dword_t query_thread(handle_t handle, thread_info_t info_type, void *buffer, size_t size)
550 {
551     dword_t ret = ERR_SUCCESS;
552     thread_t *thread;
553     void *safe_buffer;
554
555     if (get_previous_mode() == USER_MODE)
556     {
557         if (!check_usermode(buffer, size)) return ERR_BADPTR;
558
559         safe_buffer = malloc(size);
560         if (safe_buffer == NULL) return ERR_NOMEMORY;
561         memset(safe_buffer, 0, size);
562     }
563     else
564     {
565         safe_buffer = buffer;
566     }
567
568     if (handle == INVALID_HANDLE)
569     {
570         thread = get_current_thread();
571         reference(&thread->header);
572     }
573     else
574     {
575         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
576     }
577
578     switch (info_type)
579     {
580         case THREAD_TID_INFO:
581             if (size >= sizeof(dword_t)) *((dword_t*)safe_buffer) = thread->tid;
582             else ret = ERR_SMALLBUF;
583             break;
584
585         case THREAD_FROZEN_INFO:
586             if (size >= sizeof(int32_t)) *((int32_t*)safe_buffer) = thread->frozen;
587             else ret = ERR_SMALLBUF;
588             break;
589
590         case THREAD_CPU_STATE_INFO:
591             if (size >= sizeof(thread_state_t))
592             {
593                 if (current_thread->tid != thread->tid)
594                 {
595                     *((thread_state_t*)safe_buffer) = thread->state;
596                 }
597                 else
598                 {
599                     ((thread_state_t*)safe_buffer)->regs = *thread->syscall_regs;
600                     fpu_save(((thread_state_t*)safe_buffer)->fpu_state);
601                 }
602             }
603             else
604             {
605                 ret = ERR_SMALLBUF;
606             }
607
608             break;
609
610         case THREAD_PRIORITY_INFO:
611             if (size >= sizeof(priority_t)) *((priority_t*)safe_buffer) = thread->priority;
612             else ret = ERR_SMALLBUF;
613             break;
614
615         case THREAD_AFFINITY_INFO:
616             if (size >= sizeof(affinity_t)) *((affinity_t*)safe_buffer) = thread->affinity;
617             else ret = ERR_SMALLBUF;
618
619         default:
620             ret = ERR_INVALID;
621     }
622
623     if (get_previous_mode() == USER_MODE)
624     {
625         EH_TRY memcpy(buffer, safe_buffer, size);
626         EH_CATCH ret = ERR_BADPTR;
627         EH_DONE;
628
629         free(safe_buffer);
630     }
631
632     dereference(&thread->header);
633     return ret;
634 }
635
636 dword_t set_thread(handle_t handle, thread_info_t info_type, const void *buffer, size_t size)
637 {
638     dword_t ret;
639     thread_t *thread;
640     void *safe_buffer;
641
642     if (get_previous_mode() == USER_MODE)
643     {
644         if (!check_usermode(buffer, size)) return ERR_BADPTR;
645
646         safe_buffer = malloc(size);
647         if (safe_buffer == NULL) return ERR_NOMEMORY;
648
649         EH_TRY memcpy(safe_buffer, buffer, size);
650         EH_CATCH ret = ERR_BADPTR;
651         EH_DONE;
652     }
653     else
654     {
655         safe_buffer = (void*)buffer;
656     }
657
658     if (handle == INVALID_HANDLE)
659     {
660         thread = get_current_thread();
661         reference(&thread->header);
662     }
663     else
664     {
665         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread))
666         {
667             if (get_previous_mode() == USER_MODE) free(safe_buffer);
668             return ERR_INVALID;
669         }
670     }
671
672     switch (info_type)
673     {
674     case THREAD_CPU_STATE_INFO:
675         if (size >= sizeof(thread_state_t))
676         {
677             if (thread->owner_process->pid == kernel_process->pid) return ERR_FORBIDDEN;
678             thread_state_t *new_state = (thread_state_t*)safe_buffer;
679
680             critical_t critical;
681             if (current_thread->tid != thread->tid) enter_critical(&critical);
682
683             if (thread->syscall_lock == 0)
684             {
685                 thread->state.regs.eax = new_state->regs.eax;
686                 thread->state.regs.ecx = new_state->regs.ecx;
687                 thread->state.regs.edx = new_state->regs.edx;
688                 thread->state.regs.ebx = new_state->regs.ebx;
689                 thread->state.regs.esp = new_state->regs.esp;
690                 thread->state.regs.ebp = new_state->regs.ebp;
691                 thread->state.regs.esi = new_state->regs.esi;
692                 thread->state.regs.edi = new_state->regs.edi;
693                 thread->state.regs.eip = new_state->regs.eip;
694                 thread->state.regs.eflags = (thread->state.regs.eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
695             }
696             else if (thread->syscall_regs != NULL)
697             {
698                 thread->syscall_regs->eax = new_state->regs.eax;
699                 thread->syscall_regs->ecx = new_state->regs.ecx;
700                 thread->syscall_regs->edx = new_state->regs.edx;
701                 thread->syscall_regs->ebx = new_state->regs.ebx;
702                 thread->syscall_regs->esp = new_state->regs.esp;
703                 thread->syscall_regs->ebp = new_state->regs.ebp;
704                 thread->syscall_regs->esi = new_state->regs.esi;
705                 thread->syscall_regs->edi = new_state->regs.edi;
706                 thread->syscall_regs->eip = new_state->regs.eip;
707                 thread->syscall_regs->eflags = (thread->state.regs.eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
708             }
709
710
711             if (current_thread->tid != thread->tid)
712             {
713                 memcpy(thread->state.fpu_state, new_state->fpu_state, sizeof(thread->state.fpu_state));
714             }
715             else
716             {
717                 fpu_restore(new_state->fpu_state);
718             }
719
720             if (current_thread->tid != thread->tid) leave_critical(&critical);
721         }
722         else
723         {
724             ret = ERR_SMALLBUF;
725         }
726
727         break;
728
729     case THREAD_PRIORITY_INFO:
730         if (size >= sizeof(priority_t)) thread->priority = *((priority_t*)safe_buffer);
731         else ret = ERR_SMALLBUF;
732
733     case THREAD_AFFINITY_INFO:
734         if (size >= sizeof(affinity_t)) thread->affinity = *((affinity_t*)safe_buffer);
735         else ret = ERR_SMALLBUF;
736         break;
737
738     default:
739         ret = ERR_INVALID;
740     }
741
742     if (get_previous_mode() == USER_MODE) free(safe_buffer);
743     dereference(&thread->header);
744     return ret;
745 }
746
747 dword_t wait_thread(handle_t handle, dword_t timeout)
748 {
749     dword_t ret;
750     thread_t *thread;
751
752     if (handle == INVALID_HANDLE)
753     {
754         thread = get_current_thread();
755         reference(&thread->header);
756     }
757     else
758     {
759         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
760     }
761
762     ret = scheduler_wait(WAIT_UNTIL_NOT_EQUAL, timeout, &thread->terminated, FALSE);
763
764     dereference(&thread->header);
765     return ret;
766 }
767
768 dword_t freeze_thread(handle_t handle)
769 {
770     dword_t ret = ERR_SUCCESS;
771     thread_t *thread;
772
773     if (handle == INVALID_HANDLE)
774     {
775         thread = get_current_thread();
776         reference(&thread->header);
777     }
778     else
779     {
780         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
781     }
782
783     thread->frozen++;
784
785     dereference(&thread->header);
786     return ret;
787 }
788
789 dword_t thaw_thread(handle_t handle)
790 {
791     dword_t ret = ERR_SUCCESS;
792     thread_t *thread;
793
794     if (handle == INVALID_HANDLE)
795     {
796         thread = get_current_thread();
797         reference(&thread->header);
798     }
799     else
800     {
801         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
802     }
803
804     thread->frozen--;
805
806     dereference(&thread->header);
807     return ret;
808 }
809
810 void thread_init(void)
811 {
812     int i;
813     critical_t critical;
814
815     memset(tid_alloc_bitmap, 0, sizeof(tid_alloc_bitmap));
816     set_bit(tid_alloc_bitmap, 0);
817
818     thread_t *main_thread = (thread_t*)malloc(sizeof(thread_t));
819     if (main_thread == NULL) KERNEL_CRASH("Cannot allocate thread object");
820
821     main_thread->header.name = NULL;
822     main_thread->header.type = OBJECT_THREAD;
823
824     if (create_object(&main_thread->header) != ERR_SUCCESS)
825     {
826         KERNEL_CRASH("Cannot initialize thread object");
827     }
828
829     main_thread->tid = 0;
830     main_thread->priority = THREAD_PRIORITY_MID;
831     main_thread->kernel_stack = malloc(KERNEL_STACK_SIZE);
832     ASSERT(main_thread->kernel_stack != NULL);
833     commit_pages(main_thread->kernel_stack, KERNEL_STACK_SIZE);
834     main_thread->kernel_esp = ((uintptr_t)main_thread->kernel_stack + KERNEL_STACK_SIZE) & ~3;
835     set_kernel_esp(main_thread->kernel_esp);
836     main_thread->exit_code = 0;
837     main_thread->quantum = 0;
838     main_thread->running_ticks = 0ULL;
839     main_thread->owner_process = kernel_process;
840     list_append(&kernel_process->threads, &main_thread->in_process_list);
841     main_thread->syscall_lock = 0;
842     main_thread->terminated = FALSE;
843     main_thread->previous_mode = KERNEL_MODE;
844     main_thread->wait_condition = WAIT_NEVER;
845     main_thread->wait_timestamp = 0ULL;
846     main_thread->wait_timeout = 0;
847     main_thread->wait_pointer = NULL;
848     main_thread->wait_value = 0;
849
850     memset(&main_thread->kernel_handler, 0, sizeof(main_thread->kernel_handler));
851     memset(&main_thread->user_handler, 0, sizeof(main_thread->user_handler));
852
853     enter_critical(&critical);
854
855     current_thread = main_thread;
856     for (i = 0; i < THREAD_PRIORITY_MAX; i++) list_init(&thread_queue[i]);
857     scheduler_enabled = TRUE;
858
859     leave_critical(&critical);
860 }