Fix a stack leak. Prevent the kernel stack from overflowing.
[monolithium.git] / kernel / src / thread.c
1 /*
2  * thread.c
3  *
4  * Copyright (C) 2018 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
5  *
6  * This program is free software: you can redistribute it and/or modify
7  * it under the terms of the GNU Affero General Public License as
8  * published by the Free Software Foundation, either version 3 of the
9  * License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU Affero General Public License for more details.
15  *
16  * You should have received a copy of the GNU Affero General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <thread.h>
21 #include <timer.h>
22 #include <process.h>
23 #include <exception.h>
24 #include <syscalls.h>
25 #include <segments.h>
26 #include <heap.h>
27 #include <cpu.h>
28
29 extern void reschedule(void);
30
31 bool_t scheduler_enabled = FALSE;
32 static list_entry_t thread_queue[THREAD_PRIORITY_MAX];
33 static thread_t *current_thread = NULL;
34 static thread_t *last_fpu_thread = NULL;
35 static dword_t tid_alloc_bitmap[MAX_THREADS / 32];
36 static DECLARE_LOCK(tid_bitmap_lock);
37
38 static dword_t alloc_tid()
39 {
40     int i;
41     dword_t tid = (dword_t)-1;
42
43     lock_acquire(&tid_bitmap_lock);
44
45     for (i = 0; i < MAX_THREADS; i++)
46     {
47         if (!test_bit(tid_alloc_bitmap, i))
48         {
49             tid = i;
50             set_bit(tid_alloc_bitmap, i);
51             break;
52         }
53     }
54
55     lock_release(&tid_bitmap_lock);
56     return tid;
57 }
58
59 static inline bool_t test_condition(wait_condition_t *condition)
60 {
61     wait_condition_t **ptr;
62
63     switch (condition->type)
64     {
65     case WAIT_GROUP_ANY:
66         for (ptr = condition->conditions; *ptr; ptr++) if (test_condition(*ptr)) return TRUE;
67         return FALSE;
68     case WAIT_GROUP_ALL:
69         for (ptr = condition->conditions; *ptr; ptr++) if (!test_condition(*ptr)) return FALSE;
70         return TRUE;
71     case WAIT_ALWAYS:
72         return FALSE;
73     case WAIT_UNTIL_EQUAL:
74         return *condition->pointer == condition->value;
75     case WAIT_UNTIL_NOT_EQUAL:
76         return (*condition->pointer != condition->value);
77     case WAIT_UNTIL_LESS:
78         return (*condition->pointer < condition->value);
79     case WAIT_UNTIL_NOT_LESS:
80         return (*condition->pointer >= condition->value);
81     case WAIT_UNTIL_GREATER:
82         return (*condition->pointer > condition->value);
83     case WAIT_UNTIL_NOT_GREATER:
84         return (*condition->pointer <= condition->value);
85     default:
86         KERNEL_CRASH("Invalid wait condition value");
87         return FALSE;
88     }
89 }
90
91 static inline bool_t is_thread_ready(thread_t *thread)
92 {
93     qword_t current_time = syscall_get_milliseconds();
94
95     if (thread->terminated) return FALSE;
96     if (thread->frozen > 0 && !thread->in_kernel) return FALSE;
97     if (!thread->wait) return TRUE;
98
99     if (test_condition(thread->wait->root))
100     {
101         thread->wait->result = WAIT_CONDITION_HIT;
102         thread->wait = NULL;
103         return TRUE;
104     }
105
106     if (thread->wait->timeout != NO_TIMEOUT && (current_time - thread->wait->timestamp) >= (qword_t)thread->wait->timeout)
107     {
108         thread->wait->result = WAIT_TIMED_OUT;
109         thread->wait = NULL;
110         return TRUE;
111     }
112
113     if (thread->terminating)
114     {
115         thread->wait->result = WAIT_CANCELED;
116         thread->wait = NULL;
117         return TRUE;
118     }
119
120     return FALSE;
121 }
122
123 static void destroy_thread(thread_t *thread)
124 {
125     list_remove(&thread->in_queue_list);
126
127     lock_acquire(&thread->owner_process->thread_list_lock);
128     list_remove(&thread->in_process_list);
129     lock_release(&thread->owner_process->thread_list_lock);
130
131     free(thread->kernel_stack);
132     thread->kernel_stack = NULL;
133
134     if (thread->owner_process->threads.next == &thread->owner_process->threads)
135     {
136         destroy_process(thread->owner_process);
137     }
138
139     dereference(&thread->header);
140 }
141
142 void thread_cleanup(object_t *obj)
143 {
144     if (CONTAINER_OF(obj, thread_t, header) == last_fpu_thread) last_fpu_thread = NULL;
145 }
146
147 dword_t thread_pre_wait(object_t *obj, void *parameter, wait_condition_t *condition)
148 {
149     thread_t *thread = (thread_t*)obj;
150     condition->type = WAIT_UNTIL_NOT_EQUAL;
151     condition->pointer = &thread->terminated;
152     condition->value = FALSE;
153     return ERR_SUCCESS;
154 }
155
156 dword_t create_thread_internal(process_t *proc, thread_state_t *initial_state, dword_t flags, priority_t priority, void *kernel_stack, thread_t **new_thread)
157 {
158     dword_t ret;
159     if (proc->terminating) return ERR_CANCELED;
160
161     thread_t *thread = (thread_t*)malloc(sizeof(thread_t));
162     if (thread == NULL) return ERR_NOMEMORY;
163
164     init_object(&thread->header, NULL, OBJECT_THREAD);
165
166     ret = create_object(&thread->header);
167     if (ret != ERR_SUCCESS)
168     {
169         free(thread);
170         return ret;
171     }
172
173     thread->tid = alloc_tid();
174     if (thread->tid == (dword_t)-1)
175     {
176         ret = ERR_NOMEMORY;
177         goto cleanup;
178     }
179
180     thread->priority = priority;
181     thread->quantum = QUANTUM;
182     thread->frozen = (flags & THREAD_CREATE_FROZEN) ? TRUE : FALSE;
183     thread->running_ticks = 0ULL;
184     thread->owner_process = proc;
185     thread->exit_code = 0;
186     thread->terminating = FALSE;
187     thread->terminated = FALSE;
188     thread->last_context = NULL;
189     thread->wait = NULL;
190     memset(&thread->kernel_handler, 0, sizeof(thread->kernel_handler));
191     memset(&thread->user_handler, 0, sizeof(thread->user_handler));
192
193     thread->state = *initial_state;
194     thread->state.regs.eflags = 0x202;
195
196     if (proc != kernel_process)
197     {
198         thread->previous_mode = USER_MODE;
199         thread->in_kernel = 0;
200         thread->state.regs.cs = get_user_code_selector();
201         thread->state.regs.data_selector = get_user_data_selector();
202     }
203     else
204     {
205         thread->previous_mode = KERNEL_MODE;
206
207         thread->state.regs.cs = get_kernel_code_selector();
208         thread->state.regs.data_selector = get_kernel_data_selector();
209     }
210
211     thread->kernel_stack = kernel_stack;
212     thread->kernel_esp = ((uintptr_t)thread->kernel_stack + KERNEL_STACK_SIZE + 3) & ~3;
213
214     lock_acquire(&thread->owner_process->thread_list_lock);
215     list_append(&proc->threads, &thread->in_process_list);
216     lock_release(&thread->owner_process->thread_list_lock);
217
218     critical_t critical;
219     enter_critical(&critical);
220     list_append(&thread_queue[priority], &thread->in_queue_list);
221     leave_critical(&critical);
222
223     *new_thread = thread;
224     ret = ERR_SUCCESS;
225
226 cleanup:
227     if (ret != ERR_SUCCESS)
228     {
229         if (thread->kernel_stack) free(thread->kernel_stack);
230         if (thread != NULL) dereference(&thread->header);
231
232         if (thread->tid != (dword_t)-1)
233         {
234             lock_acquire(&tid_bitmap_lock);
235             clear_bit(tid_alloc_bitmap, thread->tid);
236             lock_release(&tid_bitmap_lock);
237         }
238     }
239
240     return ret;
241 }
242
243 thread_t *get_current_thread()
244 {
245     return current_thread;
246 }
247
248 void thread_lazy_fpu(void)
249 {
250     if (last_fpu_thread) cpu_save_fpu_state(last_fpu_thread->state.fpu_state);
251     cpu_restore_fpu_state(current_thread->state.fpu_state);
252     last_fpu_thread = current_thread;
253     asm volatile ("clts");
254 }
255
256 #include <log.h>
257
258 void scheduler(registers_t *regs)
259 {
260     int i;
261     critical_t critical;
262     enter_critical(&critical);
263
264     if (current_thread->quantum == 0)
265     {
266         list_append(&thread_queue[current_thread->priority], &current_thread->in_queue_list);
267         thread_t *next_thread = NULL;
268
269         for (i = 0; i < THREAD_PRIORITY_MAX; i++)
270         {
271             list_entry_t *ptr;
272
273             for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
274             {
275                 thread_t *thread = CONTAINER_OF(ptr, thread_t, in_queue_list);
276
277                 if (is_thread_ready(thread))
278                 {
279                     next_thread = thread;
280                     goto found;
281                 }
282             }
283         }
284
285 found:
286         ASSERT(next_thread != NULL);
287         list_remove(&next_thread->in_queue_list);
288
289         if (current_thread->tid != 0) ASSERT(current_thread->kernel_esp >= (uintptr_t)current_thread->kernel_stack);
290         if (next_thread->tid != 0) ASSERT(next_thread->kernel_esp >= (uintptr_t)next_thread->kernel_stack);
291
292         if (current_thread != next_thread)
293         {
294             memcpy(&current_thread->state.regs, regs, sizeof(registers_t));
295             current_thread->kernel_esp = regs->esp;
296             if (SEGMENT_RPL(regs->cs) != 0) current_thread->state.regs.esp = ((registers_ext_t*)regs)->esp3;
297
298             set_kernel_esp(next_thread->kernel_esp);
299
300             /*asm volatile ("pushl %eax\n"
301                           "movl %cr4, %eax\n"
302                           "orb $0x08, %al\n"
303                           "movl %eax, %cr4\n"
304                           "popl %eax\n");*/
305
306             if (SEGMENT_RPL(next_thread->state.regs.cs) != 0)
307             {
308                 push_to_stack(&next_thread->kernel_esp, get_user_data_selector());
309                 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
310             }
311
312             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eflags);
313             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.cs);
314             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eip);
315             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.error_code);
316             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eax);
317             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ecx);
318             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edx);
319             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebx);
320             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
321             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebp);
322             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esi);
323             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edi);
324             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.data_selector);
325
326             regs->esp = next_thread->kernel_esp;
327             regs->error_code = CONTEXT_SWITCH_MAGIC;
328
329             if (current_thread->owner_process != next_thread->owner_process)
330             {
331                 set_page_directory(next_thread->owner_process->memory_space.page_directory);
332             }
333         }
334
335         if (current_thread->owner_process != kernel_process)
336         {
337             bump_address_space(&current_thread->owner_process->memory_space);
338         }
339
340         if (current_thread->terminating && !current_thread->in_kernel) current_thread->terminated = TRUE;
341         if (current_thread->terminated) destroy_thread(current_thread);
342         current_thread = next_thread;
343         current_thread->quantum = QUANTUM;
344     }
345     else
346     {
347         current_thread->quantum--;
348     }
349
350     leave_critical(&critical);
351 }
352
353 wait_result_t scheduler_wait(wait_condition_t *condition, dword_t timeout)
354 {
355     if (test_condition(condition)) return WAIT_CONDITION_HIT;
356     if (timeout == 0) return WAIT_TIMED_OUT;
357
358     wait_t wait = { .root = condition, .timeout = timeout,  .timestamp = syscall_get_milliseconds(), .result = WAIT_CANCELED };
359     while (!__sync_bool_compare_and_swap(&current_thread->wait, NULL, &wait)) continue;
360     syscall_yield_quantum();
361
362     return wait.result;
363 }
364
365 sysret_t syscall_sleep(qword_t milliseconds)
366 {
367     wait_condition_t condition = { .type = WAIT_ALWAYS };
368     return scheduler_wait(&condition, milliseconds) == WAIT_CANCELED ? ERR_CANCELED : ERR_SUCCESS;
369 }
370
371 sysret_t syscall_yield_quantum()
372 {
373     current_thread->quantum = 0;
374     reschedule();
375     return ERR_SUCCESS;
376 }
377
378 dword_t create_system_thread(thread_procedure_t routine, dword_t flags, priority_t priority, dword_t stack_size, void *param, thread_t **new_thread)
379 {
380     thread_state_t initial_state;
381     memset(&initial_state, 0, sizeof(initial_state));
382
383     if (!stack_size) stack_size = KERNEL_STACK_SIZE;
384
385     void *kernel_stack = malloc(stack_size + sizeof(uintptr_t) - 1);
386     if (kernel_stack == NULL) return ERR_NOMEMORY;
387
388     dword_t ret = commit_pages(kernel_stack, stack_size);
389     if (ret != ERR_SUCCESS)
390     {
391         free(kernel_stack);
392         return ret;
393     }
394
395     initial_state.regs.eip = (dword_t)routine;
396     initial_state.regs.esp = ((dword_t)kernel_stack + stack_size + 3) & ~3;
397
398     push_to_stack((uintptr_t*)&initial_state.regs.esp, (uintptr_t)param);
399
400     return create_thread_internal(kernel_process, &initial_state, flags, priority, kernel_stack, new_thread);
401 }
402
403 sysret_t syscall_create_thread(handle_t process, thread_state_t *initial_state, dword_t flags, priority_t priority, handle_t *new_thread)
404 {
405     dword_t ret;
406     thread_state_t safe_state;
407     process_t *proc;
408     thread_t *thread;
409
410     if (get_previous_mode() == USER_MODE)
411     {
412         if (!check_usermode(initial_state, sizeof(initial_state))) return ERR_BADPTR;
413         if (!check_usermode(new_thread, sizeof(*new_thread))) return ERR_BADPTR;
414
415         EH_TRY safe_state = *initial_state;
416         EH_CATCH EH_ESCAPE(return ERR_BADPTR);
417         EH_DONE;
418     }
419     else
420     {
421         safe_state = *initial_state;
422     }
423
424     if (process != INVALID_HANDLE)
425     {
426         if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
427     }
428     else
429     {
430         proc = get_current_process();
431         reference(&proc->header);
432     }
433
434     if (get_previous_mode() == USER_MODE && proc == kernel_process)
435     {
436         ret = ERR_FORBIDDEN;
437         goto cleanup;
438     }
439
440     void *kernel_stack = malloc(KERNEL_STACK_SIZE + sizeof(uintptr_t) - 1);
441     if (kernel_stack == NULL)
442     {
443         ret = ERR_NOMEMORY;
444         goto cleanup;
445     }
446
447     ret = commit_pages(kernel_stack, KERNEL_STACK_SIZE);
448     if (ret != ERR_SUCCESS) goto cleanup;
449
450     ret = create_thread_internal(proc, &safe_state, flags, priority, kernel_stack, &thread);
451     if (ret != ERR_SUCCESS)
452     {
453         free(kernel_stack);
454         goto cleanup;
455     }
456
457     handle_t thread_handle;
458     ret = open_object(&thread->header, 0, &thread_handle);
459
460     EH_TRY *new_thread = thread_handle;
461     EH_CATCH syscall_close_object(thread_handle);
462     EH_DONE;
463
464 cleanup:
465     dereference(&proc->header);
466     return ret;
467 }
468
469 sysret_t syscall_open_thread(dword_t tid, handle_t *handle)
470 {
471     int i;
472     thread_t *thread = NULL;
473     dword_t ret = ERR_NOTFOUND;
474     critical_t critical;
475     enter_critical(&critical);
476
477     if (current_thread->tid == tid)
478     {
479         thread = current_thread;
480     }
481     else
482     {
483         for (i = 0; i < THREAD_PRIORITY_MAX; i++)
484         {
485             list_entry_t *ptr = thread_queue[i].next;
486
487             for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
488             {
489                 thread_t *entry = CONTAINER_OF(ptr, thread_t, in_queue_list);
490
491                 if (entry->tid == tid)
492                 {
493                     thread = entry;
494                     goto found;
495                 }
496             }
497         }
498     }
499
500 found:
501     if (thread != NULL) ret = open_object(&thread->header, 0, handle);
502     else ret = ERR_NOTFOUND;
503
504     leave_critical(&critical);
505     return ret;
506 }
507
508 dword_t terminate_thread_internal(thread_t *thread, dword_t exit_code)
509 {
510     thread->exit_code = exit_code;
511     thread->terminating = TRUE;
512     return ERR_SUCCESS;
513 }
514
515 sysret_t syscall_terminate_thread(handle_t handle, dword_t exit_code)
516 {
517     thread_t *thread;
518
519     if (handle == INVALID_HANDLE)
520     {
521         thread = get_current_thread();
522         reference(&thread->header);
523     }
524     else
525     {
526         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
527     }
528
529     return terminate_thread_internal(thread, exit_code);
530 }
531
532 sysret_t syscall_query_thread(handle_t handle, thread_info_t info_type, void *buffer, size_t size)
533 {
534     dword_t ret = ERR_SUCCESS;
535     thread_t *thread;
536     void *safe_buffer;
537
538     if (get_previous_mode() == USER_MODE)
539     {
540         if (!check_usermode(buffer, size)) return ERR_BADPTR;
541
542         safe_buffer = malloc(size);
543         if (safe_buffer == NULL) return ERR_NOMEMORY;
544         memset(safe_buffer, 0, size);
545     }
546     else
547     {
548         safe_buffer = buffer;
549     }
550
551     if (handle == INVALID_HANDLE)
552     {
553         thread = get_current_thread();
554         reference(&thread->header);
555     }
556     else
557     {
558         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
559     }
560
561     switch (info_type)
562     {
563         case THREAD_TID_INFO:
564             if (size >= sizeof(dword_t)) *((dword_t*)safe_buffer) = thread->tid;
565             else ret = ERR_SMALLBUF;
566             break;
567
568         case THREAD_FROZEN_INFO:
569             if (size >= sizeof(int32_t)) *((int32_t*)safe_buffer) = thread->frozen;
570             else ret = ERR_SMALLBUF;
571             break;
572
573         case THREAD_CPU_STATE_INFO:
574             if (size >= sizeof(thread_state_t))
575             {
576                 if (current_thread->tid != thread->tid)
577                 {
578                     *((thread_state_t*)safe_buffer) = thread->state;
579                 }
580                 else
581                 {
582                     ((thread_state_t*)safe_buffer)->regs = *thread->last_context;
583                     cpu_save_fpu_state(((thread_state_t*)safe_buffer)->fpu_state);
584                 }
585             }
586             else
587             {
588                 ret = ERR_SMALLBUF;
589             }
590
591             break;
592
593         case THREAD_PRIORITY_INFO:
594             if (size >= sizeof(priority_t)) *((priority_t*)safe_buffer) = thread->priority;
595             else ret = ERR_SMALLBUF;
596             break;
597
598         case THREAD_AFFINITY_INFO:
599             if (size >= sizeof(affinity_t)) *((affinity_t*)safe_buffer) = thread->affinity;
600             else ret = ERR_SMALLBUF;
601
602         default:
603             ret = ERR_INVALID;
604     }
605
606     if (get_previous_mode() == USER_MODE)
607     {
608         EH_TRY memcpy(buffer, safe_buffer, size);
609         EH_CATCH ret = ERR_BADPTR;
610         EH_DONE;
611
612         free(safe_buffer);
613     }
614
615     dereference(&thread->header);
616     return ret;
617 }
618
619 sysret_t syscall_set_thread(handle_t handle, thread_info_t info_type, const void *buffer, size_t size)
620 {
621     dword_t ret;
622     thread_t *thread;
623     void *safe_buffer;
624
625     if (get_previous_mode() == USER_MODE)
626     {
627         if (!check_usermode(buffer, size)) return ERR_BADPTR;
628
629         safe_buffer = malloc(size);
630         if (safe_buffer == NULL) return ERR_NOMEMORY;
631
632         EH_TRY memcpy(safe_buffer, buffer, size);
633         EH_CATCH ret = ERR_BADPTR;
634         EH_DONE;
635     }
636     else
637     {
638         safe_buffer = (void*)buffer;
639     }
640
641     if (handle == INVALID_HANDLE)
642     {
643         thread = get_current_thread();
644         reference(&thread->header);
645     }
646     else
647     {
648         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread))
649         {
650             if (get_previous_mode() == USER_MODE) free(safe_buffer);
651             return ERR_INVALID;
652         }
653     }
654
655     switch (info_type)
656     {
657     case THREAD_CPU_STATE_INFO:
658         if (size >= sizeof(thread_state_t))
659         {
660             if (thread->owner_process->pid == kernel_process->pid) return ERR_FORBIDDEN;
661             thread_state_t *new_state = (thread_state_t*)safe_buffer;
662
663             critical_t critical;
664             if (current_thread->tid != thread->tid) enter_critical(&critical);
665
666             if (thread->in_kernel == 0)
667             {
668                 thread->state.regs.eax = new_state->regs.eax;
669                 thread->state.regs.ecx = new_state->regs.ecx;
670                 thread->state.regs.edx = new_state->regs.edx;
671                 thread->state.regs.ebx = new_state->regs.ebx;
672                 thread->state.regs.esp = new_state->regs.esp;
673                 thread->state.regs.ebp = new_state->regs.ebp;
674                 thread->state.regs.esi = new_state->regs.esi;
675                 thread->state.regs.edi = new_state->regs.edi;
676                 thread->state.regs.eip = new_state->regs.eip;
677                 thread->state.regs.eflags = (thread->state.regs.eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
678             }
679             else if (thread->last_context)
680             {
681                 thread->last_context->eax = new_state->regs.eax;
682                 thread->last_context->ecx = new_state->regs.ecx;
683                 thread->last_context->edx = new_state->regs.edx;
684                 thread->last_context->ebx = new_state->regs.ebx;
685                 thread->last_context->esp = new_state->regs.esp;
686                 thread->last_context->ebp = new_state->regs.ebp;
687                 thread->last_context->esi = new_state->regs.esi;
688                 thread->last_context->edi = new_state->regs.edi;
689                 thread->last_context->eip = new_state->regs.eip;
690                 thread->last_context->eflags = (thread->last_context->eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
691             }
692
693             if (current_thread->tid != thread->tid)
694             {
695                 memcpy(thread->state.fpu_state, new_state->fpu_state, sizeof(thread->state.fpu_state));
696             }
697             else
698             {
699                 cpu_restore_fpu_state(new_state->fpu_state);
700             }
701
702             if (current_thread->tid != thread->tid) leave_critical(&critical);
703         }
704         else
705         {
706             ret = ERR_SMALLBUF;
707         }
708
709         break;
710
711     case THREAD_PRIORITY_INFO:
712         if (size >= sizeof(priority_t)) thread->priority = *((priority_t*)safe_buffer);
713         else ret = ERR_SMALLBUF;
714
715     case THREAD_AFFINITY_INFO:
716         if (size >= sizeof(affinity_t)) thread->affinity = *((affinity_t*)safe_buffer);
717         else ret = ERR_SMALLBUF;
718         break;
719
720     default:
721         ret = ERR_INVALID;
722     }
723
724     if (get_previous_mode() == USER_MODE) free(safe_buffer);
725     dereference(&thread->header);
726     return ret;
727 }
728
729 sysret_t syscall_freeze_thread(handle_t handle)
730 {
731     dword_t ret = ERR_SUCCESS;
732     thread_t *thread;
733
734     if (handle == INVALID_HANDLE)
735     {
736         thread = get_current_thread();
737         reference(&thread->header);
738     }
739     else
740     {
741         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
742     }
743
744     thread->frozen++;
745
746     dereference(&thread->header);
747     return ret;
748 }
749
750 sysret_t syscall_thaw_thread(handle_t handle)
751 {
752     dword_t ret = ERR_SUCCESS;
753     thread_t *thread;
754
755     if (handle == INVALID_HANDLE)
756     {
757         thread = get_current_thread();
758         reference(&thread->header);
759     }
760     else
761     {
762         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
763     }
764
765     thread->frozen--;
766
767     dereference(&thread->header);
768     return ret;
769 }
770
771 void thread_init(void)
772 {
773     int i;
774     critical_t critical;
775
776     memset(tid_alloc_bitmap, 0, sizeof(tid_alloc_bitmap));
777     set_bit(tid_alloc_bitmap, 0);
778
779     thread_t *main_thread = (thread_t*)malloc(sizeof(thread_t));
780     if (main_thread == NULL) KERNEL_CRASH("Cannot allocate thread object");
781
782     init_object(&main_thread->header, NULL, OBJECT_THREAD);
783
784     if (create_object(&main_thread->header) != ERR_SUCCESS)
785     {
786         KERNEL_CRASH("Cannot initialize thread object");
787     }
788
789     main_thread->tid = 0;
790     main_thread->priority = THREAD_PRIORITY_MID;
791     main_thread->kernel_stack = malloc(KERNEL_STACK_SIZE);
792     ASSERT(main_thread->kernel_stack != NULL);
793     commit_pages(main_thread->kernel_stack, KERNEL_STACK_SIZE);
794     main_thread->kernel_esp = ((uintptr_t)main_thread->kernel_stack + KERNEL_STACK_SIZE) & ~3;
795     set_kernel_esp(main_thread->kernel_esp);
796     main_thread->exit_code = 0;
797     main_thread->quantum = 0;
798     main_thread->running_ticks = 0ULL;
799     main_thread->owner_process = kernel_process;
800     list_append(&kernel_process->threads, &main_thread->in_process_list);
801     main_thread->in_kernel = 1;
802     main_thread->last_context = NULL;
803     main_thread->terminated = FALSE;
804     main_thread->previous_mode = KERNEL_MODE;
805     main_thread->wait = NULL;
806
807     memset(&main_thread->kernel_handler, 0, sizeof(main_thread->kernel_handler));
808     memset(&main_thread->user_handler, 0, sizeof(main_thread->user_handler));
809
810     enter_critical(&critical);
811
812     current_thread = main_thread;
813     for (i = 0; i < THREAD_PRIORITY_MAX; i++) list_init(&thread_queue[i]);
814     scheduler_enabled = TRUE;
815
816     leave_critical(&critical);
817 }