a3fa995c888249b554ad5efcfc43eef033856e43
[monolithium.git] / kernel / src / thread.c
1 /*
2  * thread.c
3  *
4  * Copyright (C) 2018 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
5  *
6  * This program is free software: you can redistribute it and/or modify
7  * it under the terms of the GNU Affero General Public License as
8  * published by the Free Software Foundation, either version 3 of the
9  * License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU Affero General Public License for more details.
15  *
16  * You should have received a copy of the GNU Affero General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <thread.h>
21 #include <timer.h>
22 #include <process.h>
23 #include <exception.h>
24 #include <syscalls.h>
25 #include <segments.h>
26 #include <heap.h>
27 #include <cpu.h>
28
29 extern void reschedule(void);
30
31 bool_t scheduler_enabled = FALSE;
32 static list_entry_t thread_queue[THREAD_PRIORITY_MAX];
33 static thread_t *current_thread = NULL;
34 static thread_t *last_fpu_thread = NULL;
35 static dword_t tid_alloc_bitmap[MAX_THREADS / 32];
36 static DECLARE_LOCK(tid_bitmap_lock);
37
38 static dword_t alloc_tid()
39 {
40     int i;
41     dword_t tid = (dword_t)-1;
42
43     lock_acquire(&tid_bitmap_lock);
44
45     for (i = 0; i < MAX_THREADS; i++)
46     {
47         if (!test_bit(tid_alloc_bitmap, i))
48         {
49             tid = i;
50             set_bit(tid_alloc_bitmap, i);
51             break;
52         }
53     }
54
55     lock_release(&tid_bitmap_lock);
56     return tid;
57 }
58
59 static inline bool_t test_condition(wait_condition_t *condition)
60 {
61     wait_condition_t **ptr;
62
63     switch (condition->type)
64     {
65     case WAIT_GROUP_ANY:
66         for (ptr = condition->conditions; *ptr; ptr++) if (test_condition(*ptr)) return TRUE;
67         return FALSE;
68     case WAIT_GROUP_ALL:
69         for (ptr = condition->conditions; *ptr; ptr++) if (!test_condition(*ptr)) return FALSE;
70         return TRUE;
71     case WAIT_ALWAYS:
72         return FALSE;
73     case WAIT_UNTIL_EQUAL:
74         return *condition->pointer == condition->value;
75     case WAIT_UNTIL_NOT_EQUAL:
76         return (*condition->pointer != condition->value);
77     case WAIT_UNTIL_LESS:
78         return (*condition->pointer < condition->value);
79     case WAIT_UNTIL_NOT_LESS:
80         return (*condition->pointer >= condition->value);
81     case WAIT_UNTIL_GREATER:
82         return (*condition->pointer > condition->value);
83     case WAIT_UNTIL_NOT_GREATER:
84         return (*condition->pointer <= condition->value);
85     default:
86         KERNEL_CRASH("Invalid wait condition value");
87         return FALSE;
88     }
89 }
90
91 static inline bool_t is_thread_ready(thread_t *thread)
92 {
93     qword_t current_time = syscall_get_milliseconds();
94
95     if (thread->terminated) return FALSE;
96     if (thread->frozen > 0 && !thread->in_kernel) return FALSE;
97     if (!thread->wait) return TRUE;
98
99     if (test_condition(thread->wait->root))
100     {
101         thread->wait->result = WAIT_CONDITION_HIT;
102         thread->wait = NULL;
103         return TRUE;
104     }
105
106     if (thread->wait->timeout != NO_TIMEOUT && (current_time - thread->wait->timestamp) >= (qword_t)thread->wait->timeout)
107     {
108         thread->wait->result = WAIT_TIMED_OUT;
109         thread->wait = NULL;
110         return TRUE;
111     }
112
113     if (thread->terminating)
114     {
115         thread->wait->result = WAIT_CANCELED;
116         thread->wait = NULL;
117         return TRUE;
118     }
119
120     return FALSE;
121 }
122
123 static void destroy_thread(thread_t *thread)
124 {
125     list_remove(&thread->in_queue_list);
126
127     lock_acquire(&thread->owner_process->thread_list_lock);
128     list_remove(&thread->in_process_list);
129     lock_release(&thread->owner_process->thread_list_lock);
130
131     free(thread->kernel_stack);
132     thread->kernel_stack = NULL;
133
134     if (thread->owner_process->threads.next == &thread->owner_process->threads)
135     {
136         destroy_process(thread->owner_process);
137     }
138
139     dereference(&thread->header);
140 }
141
142 void thread_cleanup(object_t *obj)
143 {
144     if (CONTAINER_OF(obj, thread_t, header) == last_fpu_thread) last_fpu_thread = NULL;
145 }
146
147 dword_t thread_pre_wait(object_t *obj, void *parameter, wait_condition_t *condition)
148 {
149     thread_t *thread = (thread_t*)obj;
150     condition->type = WAIT_UNTIL_NOT_EQUAL;
151     condition->pointer = &thread->terminated;
152     condition->value = FALSE;
153     return ERR_SUCCESS;
154 }
155
156 dword_t create_thread_internal(process_t *proc, thread_state_t *initial_state, dword_t flags, priority_t priority, void *kernel_stack, thread_t **new_thread)
157 {
158     dword_t ret;
159     if (proc->terminating) return ERR_CANCELED;
160
161     thread_t *thread = (thread_t*)malloc(sizeof(thread_t));
162     if (thread == NULL) return ERR_NOMEMORY;
163
164     init_object(&thread->header, NULL, OBJECT_THREAD);
165
166     ret = create_object(&thread->header);
167     if (ret != ERR_SUCCESS)
168     {
169         free(thread);
170         return ret;
171     }
172
173     thread->tid = alloc_tid();
174     if (thread->tid == (dword_t)-1)
175     {
176         ret = ERR_NOMEMORY;
177         goto cleanup;
178     }
179
180     thread->priority = priority;
181     thread->quantum = QUANTUM;
182     thread->frozen = (flags & THREAD_CREATE_FROZEN) ? TRUE : FALSE;
183     thread->running_ticks = 0ULL;
184     thread->owner_process = proc;
185     thread->exit_code = 0;
186     thread->terminating = FALSE;
187     thread->terminated = FALSE;
188     thread->last_context = NULL;
189     thread->wait = NULL;
190     memset(&thread->kernel_handler, 0, sizeof(thread->kernel_handler));
191     memset(&thread->user_handler, 0, sizeof(thread->user_handler));
192
193     thread->state = *initial_state;
194     thread->state.regs.eflags = 0x202;
195
196     if (proc != kernel_process)
197     {
198         thread->previous_mode = USER_MODE;
199         thread->in_kernel = 0;
200         thread->state.regs.cs = get_user_code_selector();
201         thread->state.regs.data_selector = get_user_data_selector();
202     }
203     else
204     {
205         thread->previous_mode = KERNEL_MODE;
206
207         thread->state.regs.cs = get_kernel_code_selector();
208         thread->state.regs.data_selector = get_kernel_data_selector();
209     }
210
211     thread->kernel_stack = kernel_stack;
212     thread->kernel_esp = ((uintptr_t)thread->kernel_stack + KERNEL_STACK_SIZE + 3) & ~3;
213
214     lock_acquire(&thread->owner_process->thread_list_lock);
215     list_append(&proc->threads, &thread->in_process_list);
216     lock_release(&thread->owner_process->thread_list_lock);
217
218     critical_t critical;
219     enter_critical(&critical);
220     list_append(&thread_queue[priority], &thread->in_queue_list);
221     leave_critical(&critical);
222
223     *new_thread = thread;
224     ret = ERR_SUCCESS;
225
226 cleanup:
227     if (ret != ERR_SUCCESS)
228     {
229         if (thread->kernel_stack) free(thread->kernel_stack);
230         if (thread != NULL) dereference(&thread->header);
231
232         if (thread->tid != (dword_t)-1)
233         {
234             lock_acquire(&tid_bitmap_lock);
235             clear_bit(tid_alloc_bitmap, thread->tid);
236             lock_release(&tid_bitmap_lock);
237         }
238     }
239
240     return ret;
241 }
242
243 thread_t *get_current_thread()
244 {
245     return current_thread;
246 }
247
248 void thread_lazy_fpu(void)
249 {
250     if (last_fpu_thread) cpu_save_fpu_state(last_fpu_thread->state.fpu_state);
251     cpu_restore_fpu_state(current_thread->state.fpu_state);
252     last_fpu_thread = current_thread;
253     asm volatile ("clts");
254 }
255
256 void scheduler(registers_t *regs)
257 {
258     int i;
259     critical_t critical;
260     enter_critical(&critical);
261
262     if (current_thread->quantum == 0)
263     {
264         list_append(&thread_queue[current_thread->priority], &current_thread->in_queue_list);
265         thread_t *next_thread = NULL;
266
267         for (i = 0; i < THREAD_PRIORITY_MAX; i++)
268         {
269             list_entry_t *ptr;
270
271             for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
272             {
273                 thread_t *thread = CONTAINER_OF(ptr, thread_t, in_queue_list);
274
275                 if (is_thread_ready(thread))
276                 {
277                     next_thread = thread;
278                     goto found;
279                 }
280             }
281         }
282
283 found:
284         ASSERT(next_thread != NULL);
285         list_remove(&next_thread->in_queue_list);
286
287         if (current_thread != next_thread)
288         {
289             memcpy(&current_thread->state.regs, regs, sizeof(registers_t));
290
291             current_thread->kernel_esp = regs->esp;
292             current_thread->state.regs.esp = ((registers_ext_t*)regs)->esp3;
293
294             set_kernel_esp(next_thread->kernel_esp);
295
296             asm volatile ("pushl %eax\n"
297                           "movl %cr4, %eax\n"
298                           "orb $0x08, %al\n"
299                           "movl %eax, %cr4\n"
300                           "popl %eax\n");
301
302             if (SEGMENT_RPL(next_thread->state.regs.cs) != 0)
303             {
304                 push_to_stack(&next_thread->kernel_esp, get_user_data_selector());
305                 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
306             }
307
308             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eflags);
309             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.cs);
310             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eip);
311             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.error_code);
312             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eax);
313             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ecx);
314             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edx);
315             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebx);
316             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
317             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebp);
318             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esi);
319             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edi);
320             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.data_selector);
321
322             regs->esp = next_thread->kernel_esp;
323             regs->error_code = CONTEXT_SWITCH_MAGIC;
324
325             if (current_thread->owner_process != next_thread->owner_process)
326             {
327                 set_page_directory(next_thread->owner_process->memory_space.page_directory);
328             }
329         }
330
331         if (current_thread->owner_process != kernel_process)
332         {
333             bump_address_space(&current_thread->owner_process->memory_space);
334         }
335
336         if (current_thread->terminating && !current_thread->in_kernel) current_thread->terminated = TRUE;
337         if (current_thread->terminated) destroy_thread(current_thread);
338         current_thread = next_thread;
339         current_thread->quantum = QUANTUM;
340     }
341     else
342     {
343         current_thread->quantum--;
344     }
345
346     leave_critical(&critical);
347 }
348
349 wait_result_t scheduler_wait(wait_condition_t *condition, dword_t timeout)
350 {
351     if (test_condition(condition)) return WAIT_CONDITION_HIT;
352     if (timeout == 0) return WAIT_TIMED_OUT;
353
354     wait_t wait = { .root = condition, .timeout = timeout,  .timestamp = syscall_get_milliseconds(), .result = WAIT_CANCELED };
355     while (!__sync_bool_compare_and_swap(&current_thread->wait, NULL, &wait)) continue;
356     syscall_yield_quantum();
357
358     return wait.result;
359 }
360
361 sysret_t syscall_sleep(qword_t milliseconds)
362 {
363     wait_condition_t condition = { .type = WAIT_ALWAYS };
364     return scheduler_wait(&condition, milliseconds) == WAIT_CANCELED ? ERR_CANCELED : ERR_SUCCESS;
365 }
366
367 sysret_t syscall_yield_quantum()
368 {
369     current_thread->quantum = 0;
370     reschedule();
371     return ERR_SUCCESS;
372 }
373
374 dword_t create_system_thread(thread_procedure_t routine, dword_t flags, priority_t priority, dword_t stack_size, void *param, thread_t **new_thread)
375 {
376     thread_state_t initial_state;
377     memset(&initial_state, 0, sizeof(initial_state));
378
379     if (!stack_size) stack_size = KERNEL_STACK_SIZE;
380
381     void *kernel_stack = malloc(stack_size + sizeof(uintptr_t) - 1);
382     if (kernel_stack == NULL) return ERR_NOMEMORY;
383
384     dword_t ret = commit_pages(kernel_stack, stack_size);
385     if (ret != ERR_SUCCESS)
386     {
387         free(kernel_stack);
388         return ret;
389     }
390
391     initial_state.regs.eip = (dword_t)routine;
392     initial_state.regs.esp = ((dword_t)kernel_stack + stack_size + 3) & ~3;
393
394     push_to_stack((uintptr_t*)&initial_state.regs.esp, (uintptr_t)param);
395
396     return create_thread_internal(kernel_process, &initial_state, flags, priority, kernel_stack, new_thread);
397 }
398
399 sysret_t syscall_create_thread(handle_t process, thread_state_t *initial_state, dword_t flags, priority_t priority, handle_t *new_thread)
400 {
401     dword_t ret;
402     thread_state_t safe_state;
403     process_t *proc;
404     thread_t *thread;
405
406     if (get_previous_mode() == USER_MODE)
407     {
408         if (!check_usermode(initial_state, sizeof(initial_state))) return ERR_BADPTR;
409         if (!check_usermode(new_thread, sizeof(*new_thread))) return ERR_BADPTR;
410
411         EH_TRY safe_state = *initial_state;
412         EH_CATCH EH_ESCAPE(return ERR_BADPTR);
413         EH_DONE;
414     }
415     else
416     {
417         safe_state = *initial_state;
418     }
419
420     if (process != INVALID_HANDLE)
421     {
422         if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
423     }
424     else
425     {
426         proc = get_current_process();
427         reference(&proc->header);
428     }
429
430     if (get_previous_mode() == USER_MODE && proc == kernel_process)
431     {
432         ret = ERR_FORBIDDEN;
433         goto cleanup;
434     }
435
436     void *kernel_stack = malloc(KERNEL_STACK_SIZE + sizeof(uintptr_t) - 1);
437     if (kernel_stack == NULL)
438     {
439         ret = ERR_NOMEMORY;
440         goto cleanup;
441     }
442
443     ret = commit_pages(kernel_stack, KERNEL_STACK_SIZE);
444     if (ret != ERR_SUCCESS) goto cleanup;
445
446     ret = create_thread_internal(proc, &safe_state, flags, priority, kernel_stack, &thread);
447     if (ret != ERR_SUCCESS)
448     {
449         free(kernel_stack);
450         goto cleanup;
451     }
452
453     handle_t thread_handle;
454     ret = open_object(&thread->header, 0, &thread_handle);
455
456     EH_TRY *new_thread = thread_handle;
457     EH_CATCH syscall_close_object(thread_handle);
458     EH_DONE;
459
460 cleanup:
461     dereference(&proc->header);
462     return ret;
463 }
464
465 sysret_t syscall_open_thread(dword_t tid, handle_t *handle)
466 {
467     int i;
468     thread_t *thread = NULL;
469     dword_t ret = ERR_NOTFOUND;
470     critical_t critical;
471     enter_critical(&critical);
472
473     if (current_thread->tid == tid)
474     {
475         thread = current_thread;
476     }
477     else
478     {
479         for (i = 0; i < THREAD_PRIORITY_MAX; i++)
480         {
481             list_entry_t *ptr = thread_queue[i].next;
482
483             for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
484             {
485                 thread_t *entry = CONTAINER_OF(ptr, thread_t, in_queue_list);
486
487                 if (entry->tid == tid)
488                 {
489                     thread = entry;
490                     goto found;
491                 }
492             }
493         }
494     }
495
496 found:
497     if (thread != NULL) ret = open_object(&thread->header, 0, handle);
498     else ret = ERR_NOTFOUND;
499
500     leave_critical(&critical);
501     return ret;
502 }
503
504 dword_t terminate_thread_internal(thread_t *thread, dword_t exit_code)
505 {
506     thread->exit_code = exit_code;
507     thread->terminating = TRUE;
508     return ERR_SUCCESS;
509 }
510
511 sysret_t syscall_terminate_thread(handle_t handle, dword_t exit_code)
512 {
513     thread_t *thread;
514
515     if (handle == INVALID_HANDLE)
516     {
517         thread = get_current_thread();
518         reference(&thread->header);
519     }
520     else
521     {
522         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
523     }
524
525     return terminate_thread_internal(thread, exit_code);
526 }
527
528 sysret_t syscall_query_thread(handle_t handle, thread_info_t info_type, void *buffer, size_t size)
529 {
530     dword_t ret = ERR_SUCCESS;
531     thread_t *thread;
532     void *safe_buffer;
533
534     if (get_previous_mode() == USER_MODE)
535     {
536         if (!check_usermode(buffer, size)) return ERR_BADPTR;
537
538         safe_buffer = malloc(size);
539         if (safe_buffer == NULL) return ERR_NOMEMORY;
540         memset(safe_buffer, 0, size);
541     }
542     else
543     {
544         safe_buffer = buffer;
545     }
546
547     if (handle == INVALID_HANDLE)
548     {
549         thread = get_current_thread();
550         reference(&thread->header);
551     }
552     else
553     {
554         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
555     }
556
557     switch (info_type)
558     {
559         case THREAD_TID_INFO:
560             if (size >= sizeof(dword_t)) *((dword_t*)safe_buffer) = thread->tid;
561             else ret = ERR_SMALLBUF;
562             break;
563
564         case THREAD_FROZEN_INFO:
565             if (size >= sizeof(int32_t)) *((int32_t*)safe_buffer) = thread->frozen;
566             else ret = ERR_SMALLBUF;
567             break;
568
569         case THREAD_CPU_STATE_INFO:
570             if (size >= sizeof(thread_state_t))
571             {
572                 if (current_thread->tid != thread->tid)
573                 {
574                     *((thread_state_t*)safe_buffer) = thread->state;
575                 }
576                 else
577                 {
578                     ((thread_state_t*)safe_buffer)->regs = *thread->last_context;
579                     cpu_save_fpu_state(((thread_state_t*)safe_buffer)->fpu_state);
580                 }
581             }
582             else
583             {
584                 ret = ERR_SMALLBUF;
585             }
586
587             break;
588
589         case THREAD_PRIORITY_INFO:
590             if (size >= sizeof(priority_t)) *((priority_t*)safe_buffer) = thread->priority;
591             else ret = ERR_SMALLBUF;
592             break;
593
594         case THREAD_AFFINITY_INFO:
595             if (size >= sizeof(affinity_t)) *((affinity_t*)safe_buffer) = thread->affinity;
596             else ret = ERR_SMALLBUF;
597
598         default:
599             ret = ERR_INVALID;
600     }
601
602     if (get_previous_mode() == USER_MODE)
603     {
604         EH_TRY memcpy(buffer, safe_buffer, size);
605         EH_CATCH ret = ERR_BADPTR;
606         EH_DONE;
607
608         free(safe_buffer);
609     }
610
611     dereference(&thread->header);
612     return ret;
613 }
614
615 sysret_t syscall_set_thread(handle_t handle, thread_info_t info_type, const void *buffer, size_t size)
616 {
617     dword_t ret;
618     thread_t *thread;
619     void *safe_buffer;
620
621     if (get_previous_mode() == USER_MODE)
622     {
623         if (!check_usermode(buffer, size)) return ERR_BADPTR;
624
625         safe_buffer = malloc(size);
626         if (safe_buffer == NULL) return ERR_NOMEMORY;
627
628         EH_TRY memcpy(safe_buffer, buffer, size);
629         EH_CATCH ret = ERR_BADPTR;
630         EH_DONE;
631     }
632     else
633     {
634         safe_buffer = (void*)buffer;
635     }
636
637     if (handle == INVALID_HANDLE)
638     {
639         thread = get_current_thread();
640         reference(&thread->header);
641     }
642     else
643     {
644         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread))
645         {
646             if (get_previous_mode() == USER_MODE) free(safe_buffer);
647             return ERR_INVALID;
648         }
649     }
650
651     switch (info_type)
652     {
653     case THREAD_CPU_STATE_INFO:
654         if (size >= sizeof(thread_state_t))
655         {
656             if (thread->owner_process->pid == kernel_process->pid) return ERR_FORBIDDEN;
657             thread_state_t *new_state = (thread_state_t*)safe_buffer;
658
659             critical_t critical;
660             if (current_thread->tid != thread->tid) enter_critical(&critical);
661
662             if (thread->in_kernel == 0)
663             {
664                 thread->state.regs.eax = new_state->regs.eax;
665                 thread->state.regs.ecx = new_state->regs.ecx;
666                 thread->state.regs.edx = new_state->regs.edx;
667                 thread->state.regs.ebx = new_state->regs.ebx;
668                 thread->state.regs.esp = new_state->regs.esp;
669                 thread->state.regs.ebp = new_state->regs.ebp;
670                 thread->state.regs.esi = new_state->regs.esi;
671                 thread->state.regs.edi = new_state->regs.edi;
672                 thread->state.regs.eip = new_state->regs.eip;
673                 thread->state.regs.eflags = (thread->state.regs.eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
674             }
675             else if (thread->last_context)
676             {
677                 thread->last_context->eax = new_state->regs.eax;
678                 thread->last_context->ecx = new_state->regs.ecx;
679                 thread->last_context->edx = new_state->regs.edx;
680                 thread->last_context->ebx = new_state->regs.ebx;
681                 thread->last_context->esp = new_state->regs.esp;
682                 thread->last_context->ebp = new_state->regs.ebp;
683                 thread->last_context->esi = new_state->regs.esi;
684                 thread->last_context->edi = new_state->regs.edi;
685                 thread->last_context->eip = new_state->regs.eip;
686                 thread->last_context->eflags = (thread->last_context->eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
687             }
688
689             if (current_thread->tid != thread->tid)
690             {
691                 memcpy(thread->state.fpu_state, new_state->fpu_state, sizeof(thread->state.fpu_state));
692             }
693             else
694             {
695                 cpu_restore_fpu_state(new_state->fpu_state);
696             }
697
698             if (current_thread->tid != thread->tid) leave_critical(&critical);
699         }
700         else
701         {
702             ret = ERR_SMALLBUF;
703         }
704
705         break;
706
707     case THREAD_PRIORITY_INFO:
708         if (size >= sizeof(priority_t)) thread->priority = *((priority_t*)safe_buffer);
709         else ret = ERR_SMALLBUF;
710
711     case THREAD_AFFINITY_INFO:
712         if (size >= sizeof(affinity_t)) thread->affinity = *((affinity_t*)safe_buffer);
713         else ret = ERR_SMALLBUF;
714         break;
715
716     default:
717         ret = ERR_INVALID;
718     }
719
720     if (get_previous_mode() == USER_MODE) free(safe_buffer);
721     dereference(&thread->header);
722     return ret;
723 }
724
725 sysret_t syscall_freeze_thread(handle_t handle)
726 {
727     dword_t ret = ERR_SUCCESS;
728     thread_t *thread;
729
730     if (handle == INVALID_HANDLE)
731     {
732         thread = get_current_thread();
733         reference(&thread->header);
734     }
735     else
736     {
737         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
738     }
739
740     thread->frozen++;
741
742     dereference(&thread->header);
743     return ret;
744 }
745
746 sysret_t syscall_thaw_thread(handle_t handle)
747 {
748     dword_t ret = ERR_SUCCESS;
749     thread_t *thread;
750
751     if (handle == INVALID_HANDLE)
752     {
753         thread = get_current_thread();
754         reference(&thread->header);
755     }
756     else
757     {
758         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
759     }
760
761     thread->frozen--;
762
763     dereference(&thread->header);
764     return ret;
765 }
766
767 void thread_init(void)
768 {
769     int i;
770     critical_t critical;
771
772     memset(tid_alloc_bitmap, 0, sizeof(tid_alloc_bitmap));
773     set_bit(tid_alloc_bitmap, 0);
774
775     thread_t *main_thread = (thread_t*)malloc(sizeof(thread_t));
776     if (main_thread == NULL) KERNEL_CRASH("Cannot allocate thread object");
777
778     init_object(&main_thread->header, NULL, OBJECT_THREAD);
779
780     if (create_object(&main_thread->header) != ERR_SUCCESS)
781     {
782         KERNEL_CRASH("Cannot initialize thread object");
783     }
784
785     main_thread->tid = 0;
786     main_thread->priority = THREAD_PRIORITY_MID;
787     main_thread->kernel_stack = malloc(KERNEL_STACK_SIZE);
788     ASSERT(main_thread->kernel_stack != NULL);
789     commit_pages(main_thread->kernel_stack, KERNEL_STACK_SIZE);
790     main_thread->kernel_esp = ((uintptr_t)main_thread->kernel_stack + KERNEL_STACK_SIZE) & ~3;
791     set_kernel_esp(main_thread->kernel_esp);
792     main_thread->exit_code = 0;
793     main_thread->quantum = 0;
794     main_thread->running_ticks = 0ULL;
795     main_thread->owner_process = kernel_process;
796     list_append(&kernel_process->threads, &main_thread->in_process_list);
797     main_thread->in_kernel = 1;
798     main_thread->last_context = NULL;
799     main_thread->terminated = FALSE;
800     main_thread->previous_mode = KERNEL_MODE;
801     main_thread->wait = NULL;
802
803     memset(&main_thread->kernel_handler, 0, sizeof(main_thread->kernel_handler));
804     memset(&main_thread->user_handler, 0, sizeof(main_thread->user_handler));
805
806     enter_critical(&critical);
807
808     current_thread = main_thread;
809     for (i = 0; i < THREAD_PRIORITY_MAX; i++) list_init(&thread_queue[i]);
810     scheduler_enabled = TRUE;
811
812     leave_critical(&critical);
813 }