8d2ee47d855d28a6b726654ac82ebbe174718d61
[monolithium.git] / kernel / src / thread.c
1 /*
2  * thread.c
3  *
4  * Copyright (C) 2016 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
5  *
6  * This program is free software: you can redistribute it and/or modify
7  * it under the terms of the GNU Affero General Public License as
8  * published by the Free Software Foundation, either version 3 of the
9  * License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU Affero General Public License for more details.
15  *
16  * You should have received a copy of the GNU Affero General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <thread.h>
21 #include <timer.h>
22 #include <process.h>
23 #include <exception.h>
24 #include <syscalls.h>
25 #include <segments.h>
26 #include <heap.h>
27 #include <cpu.h>
28
29 extern void reschedule(void);
30
31 bool_t scheduler_enabled = FALSE;
32 static list_entry_t thread_queue[THREAD_PRIORITY_MAX];
33 static thread_t *current_thread = NULL;
34 static thread_t *last_fpu_thread = NULL;
35 static dword_t tid_alloc_bitmap[MAX_THREADS / 32];
36 static lock_t tid_bitmap_lock = 0;
37
38 static dword_t alloc_tid()
39 {
40     int i;
41     dword_t tid = (dword_t)-1;
42
43     acquire_lock(&tid_bitmap_lock);
44
45     for (i = 0; i < MAX_THREADS; i++)
46     {
47         if (!test_bit(tid_alloc_bitmap, i))
48         {
49             tid = i;
50             set_bit(tid_alloc_bitmap, i);
51             break;
52         }
53     }
54
55     release_lock(&tid_bitmap_lock);
56     return tid;
57 }
58
59 static inline bool_t test_condition(wait_condition_t condition, dword_t *pointer, dword_t value)
60 {
61     bool_t satisfied;
62
63     switch (condition)
64     {
65     case WAIT_NEVER:
66         satisfied = TRUE;
67         break;
68
69     case WAIT_ALWAYS:
70         satisfied = FALSE;
71         break;
72
73     case WAIT_UNTIL_EQUAL:
74         satisfied = (*pointer == value);
75         break;
76
77     case WAIT_UNTIL_NOT_EQUAL:
78         satisfied = (*pointer != value);
79         break;
80
81     case WAIT_UNTIL_LESS:
82         satisfied = (*pointer < value);
83         break;
84
85     case WAIT_UNTIL_NOT_LESS:
86         satisfied = (*pointer >= value);
87         break;
88
89     case WAIT_UNTIL_GREATER:
90         satisfied = (*pointer > value);
91         break;
92
93     case WAIT_UNTIL_NOT_GREATER:
94         satisfied = (*pointer <= value);
95         break;
96
97     default:
98         KERNEL_CRASH("Invalid wait condition value");
99         break;
100     }
101
102     return satisfied;
103 }
104
105 static inline bool_t is_thread_ready(thread_t *thread)
106 {
107     qword_t current_time = syscall_get_milliseconds();
108
109     if (thread->terminated) return FALSE;
110     if (thread->frozen > 0 && !thread->syscall_lock) return FALSE;
111
112     if (test_condition(thread->wait_condition, thread->wait_pointer, thread->wait_value))
113     {
114         thread->wait_condition = WAIT_NEVER;
115         thread->wait_result = WAIT_CONDITION_HIT;
116         return TRUE;
117     }
118
119     if (thread->wait_timeout != 0ULL && (current_time - thread->wait_timestamp) >= (qword_t)thread->wait_timeout)
120     {
121         thread->wait_condition = WAIT_NEVER;
122         thread->wait_result = WAIT_TIMED_OUT;
123         return TRUE;
124     }
125
126     if (thread->cancel_io)
127     {
128         thread->wait_condition = WAIT_NEVER;
129         thread->wait_result = WAIT_CANCELED;
130         return TRUE;
131     }
132
133     return FALSE;
134 }
135
136 static void destroy_thread(thread_t *thread)
137 {
138     list_remove(&thread->in_queue_list);
139
140     acquire_resource_exclusive(&thread->owner_process->thread_list_res);
141     list_remove(&thread->in_process_list);
142     release_resource(&thread->owner_process->thread_list_res);
143
144     free(thread->kernel_stack);
145     thread->kernel_stack = NULL;
146
147     if (thread->owner_process->threads.next == &thread->owner_process->threads)
148     {
149         destroy_process(thread->owner_process);
150     }
151
152     dereference(&thread->header);
153 }
154
155 void thread_cleanup(object_t *obj)
156 {
157     if (CONTAINER_OF(obj, thread_t, header) == last_fpu_thread) last_fpu_thread = NULL;
158 }
159
160 dword_t create_thread_internal(process_t *proc, thread_state_t *initial_state, dword_t flags, priority_t priority, void *kernel_stack, thread_t **new_thread)
161 {
162     dword_t ret;
163     if (proc->terminating) return ERR_CANCELED;
164
165     thread_t *thread = (thread_t*)malloc(sizeof(thread_t));
166     if (thread == NULL) return ERR_NOMEMORY;
167
168     init_object(&thread->header, NULL, OBJECT_THREAD);
169
170     ret = create_object(&thread->header);
171     if (ret != ERR_SUCCESS)
172     {
173         free(thread);
174         return ret;
175     }
176
177     thread->tid = alloc_tid();
178     if (thread->tid == (dword_t)-1)
179     {
180         ret = ERR_NOMEMORY;
181         goto cleanup;
182     }
183
184     thread->priority = priority;
185     thread->quantum = QUANTUM;
186     thread->frozen = (flags & THREAD_CREATE_FROZEN) ? TRUE : FALSE;
187     thread->running_ticks = 0ULL;
188     thread->owner_process = proc;
189     thread->exit_code = 0;
190     thread->terminated = FALSE;
191     thread->cancel_io = FALSE;
192     thread->syscall_lock = 0;
193     thread->wait_condition = WAIT_NEVER;
194     thread->wait_timestamp = 0ULL;
195     thread->wait_timeout = 0;
196     thread->wait_pointer = NULL;
197     thread->wait_value = 0;
198     memset(&thread->kernel_handler, 0, sizeof(thread->kernel_handler));
199     memset(&thread->user_handler, 0, sizeof(thread->user_handler));
200
201     thread->state = *initial_state;
202     thread->state.regs.eflags = 0x202;
203
204     if (proc != kernel_process)
205     {
206         thread->previous_mode = USER_MODE;
207
208         thread->state.regs.cs = get_user_code_selector();
209         thread->state.regs.data_selector = get_user_data_selector();
210     }
211     else
212     {
213         thread->previous_mode = KERNEL_MODE;
214
215         thread->state.regs.cs = get_kernel_code_selector();
216         thread->state.regs.data_selector = get_kernel_data_selector();
217     }
218
219     thread->kernel_stack = kernel_stack;
220     thread->kernel_esp = ((uintptr_t)thread->kernel_stack + KERNEL_STACK_SIZE + 3) & ~3;
221
222     acquire_resource_exclusive(&thread->owner_process->thread_list_res);
223     list_append(&proc->threads, &thread->in_process_list);
224     release_resource(&thread->owner_process->thread_list_res);
225
226     critical_t critical;
227     enter_critical(&critical);
228     list_append(&thread_queue[priority], &thread->in_queue_list);
229     leave_critical(&critical);
230
231     *new_thread = thread;
232     ret = ERR_SUCCESS;
233
234 cleanup:
235     if (ret != ERR_SUCCESS)
236     {
237         if (thread->kernel_stack) free(thread->kernel_stack);
238         if (thread != NULL) dereference(&thread->header);
239
240         if (thread->tid != (dword_t)-1)
241         {
242             acquire_lock(&tid_bitmap_lock);
243             clear_bit(tid_alloc_bitmap, thread->tid);
244             release_lock(&tid_bitmap_lock);
245         }
246     }
247
248     return ret;
249 }
250
251 thread_t *get_current_thread()
252 {
253     return current_thread;
254 }
255
256 void thread_lazy_fpu(void)
257 {
258     if (last_fpu_thread) fpu_save(last_fpu_thread->state.fpu_state);
259     fpu_restore(current_thread->state.fpu_state);
260     last_fpu_thread = current_thread;
261     asm volatile ("clts");
262 }
263
264 void scheduler(registers_t *regs)
265 {
266     int i;
267     critical_t critical;
268     enter_critical(&critical);
269
270     if (current_thread->quantum == 0)
271     {
272         list_append(&thread_queue[current_thread->priority], &current_thread->in_queue_list);
273         thread_t *next_thread = NULL;
274
275         for (i = 0; i < THREAD_PRIORITY_MAX; i++)
276         {
277             list_entry_t *ptr;
278
279             for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
280             {
281                 thread_t *thread = CONTAINER_OF(ptr, thread_t, in_queue_list);
282
283                 if (is_thread_ready(thread))
284                 {
285                     next_thread = thread;
286                     goto found;
287                 }
288             }
289         }
290
291 found:
292         ASSERT(next_thread != NULL);
293         list_remove(&next_thread->in_queue_list);
294
295         if (current_thread != next_thread)
296         {
297             memcpy(&current_thread->state.regs, regs, sizeof(registers_t));
298
299             current_thread->kernel_esp = regs->esp;
300             current_thread->state.regs.esp = ((registers_ext_t*)regs)->esp3;
301
302             set_kernel_esp(next_thread->kernel_esp);
303
304             asm volatile ("pushl %eax\n"
305                           "movl %cr4, %eax\n"
306                           "orb $0x08, %al\n"
307                           "movl %eax, %cr4\n"
308                           "popl %eax\n");
309
310             if (SEGMENT_RPL(next_thread->state.regs.cs) != 0)
311             {
312                 push_to_stack(&next_thread->kernel_esp, get_user_data_selector());
313                 push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
314             }
315
316             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eflags);
317             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.cs);
318             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eip);
319             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.error_code);
320             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eax);
321             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ecx);
322             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edx);
323             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebx);
324             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
325             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebp);
326             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esi);
327             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edi);
328             push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.data_selector);
329
330             regs->esp = next_thread->kernel_esp;
331             regs->error_code = CONTEXT_SWITCH_MAGIC;
332
333             if (current_thread->owner_process != next_thread->owner_process)
334             {
335                 set_page_directory(next_thread->owner_process->memory_space.page_directory);
336             }
337         }
338
339         if (current_thread->owner_process != kernel_process)
340         {
341             bump_address_space(&current_thread->owner_process->memory_space);
342         }
343
344         if (current_thread->terminated) destroy_thread(current_thread);
345         current_thread = next_thread;
346         current_thread->quantum = QUANTUM;
347     }
348     else
349     {
350         current_thread->quantum--;
351     }
352
353     leave_critical(&critical);
354 }
355
356 wait_result_t scheduler_wait(wait_condition_t condition, dword_t timeout, uintptr_t *pointer, uintptr_t value)
357 {
358     if (test_condition(condition, pointer, value)) return WAIT_CONDITION_HIT;
359     if (timeout == 0) return WAIT_TIMED_OUT;
360
361     critical_t critical;
362     enter_critical(&critical);
363
364     if (timeout != NO_TIMEOUT)
365     {
366         current_thread->wait_timestamp = syscall_get_milliseconds();
367         current_thread->wait_timeout = timeout;
368     }
369     else
370     {
371         current_thread->wait_timestamp = 0ULL;
372         current_thread->wait_timeout = 0;
373     }
374
375     current_thread->wait_pointer = pointer;
376     current_thread->wait_value = value;
377     current_thread->wait_condition = condition;
378
379     leave_critical(&critical);
380     syscall_yield_quantum();
381
382     return current_thread->wait_result;
383 }
384
385 sysret_t syscall_sleep(qword_t milliseconds)
386 {
387     scheduler_wait(WAIT_ALWAYS, milliseconds, NULL, 0);
388     return ERR_SUCCESS;
389 }
390
391 sysret_t syscall_yield_quantum()
392 {
393     current_thread->quantum = 0;
394     reschedule();
395     return ERR_SUCCESS;
396 }
397
398 dword_t create_system_thread(thread_procedure_t routine, dword_t flags, priority_t priority, dword_t stack_size, void *param, thread_t **new_thread)
399 {
400     thread_state_t initial_state;
401     memset(&initial_state, 0, sizeof(initial_state));
402
403     if (!stack_size) stack_size = KERNEL_STACK_SIZE;
404
405     void *kernel_stack = malloc(stack_size + sizeof(uintptr_t) - 1);
406     if (kernel_stack == NULL) return ERR_NOMEMORY;
407
408     dword_t ret = commit_pages(kernel_stack, stack_size);
409     if (ret != ERR_SUCCESS)
410     {
411         free(kernel_stack);
412         return ret;
413     }
414
415     initial_state.regs.eip = (dword_t)routine;
416     initial_state.regs.esp = ((dword_t)kernel_stack + stack_size + 3) & ~3;
417
418     push_to_stack((uintptr_t*)&initial_state.regs.esp, (uintptr_t)param);
419
420     return create_thread_internal(kernel_process, &initial_state, flags, priority, kernel_stack, new_thread);
421 }
422
423 sysret_t syscall_create_thread(handle_t process, thread_state_t *initial_state, dword_t flags, priority_t priority, handle_t *new_thread)
424 {
425     dword_t ret;
426     thread_state_t safe_state;
427     process_t *proc;
428     thread_t *thread;
429
430     if (get_previous_mode() == USER_MODE)
431     {
432         if (!check_usermode(initial_state, sizeof(initial_state))) return ERR_BADPTR;
433         if (!check_usermode(new_thread, sizeof(*new_thread))) return ERR_BADPTR;
434
435         EH_TRY safe_state = *initial_state;
436         EH_CATCH EH_ESCAPE(return ERR_BADPTR);
437         EH_DONE;
438     }
439     else
440     {
441         safe_state = *initial_state;
442     }
443
444     if (process != INVALID_HANDLE)
445     {
446         if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
447     }
448     else
449     {
450         proc = get_current_process();
451         reference(&proc->header);
452     }
453
454     if (get_previous_mode() == USER_MODE && proc == kernel_process)
455     {
456         ret = ERR_FORBIDDEN;
457         goto cleanup;
458     }
459
460     void *kernel_stack = malloc(KERNEL_STACK_SIZE + sizeof(uintptr_t) - 1);
461     if (kernel_stack == NULL)
462     {
463         ret = ERR_NOMEMORY;
464         goto cleanup;
465     }
466
467     ret = commit_pages(kernel_stack, KERNEL_STACK_SIZE);
468     if (ret != ERR_SUCCESS) goto cleanup;
469
470     ret = create_thread_internal(proc, &safe_state, flags, priority, kernel_stack, &thread);
471     if (ret != ERR_SUCCESS)
472     {
473         free(kernel_stack);
474         goto cleanup;
475     }
476
477     handle_t thread_handle;
478     ret = open_object(&thread->header, 0, &thread_handle);
479
480     EH_TRY *new_thread = thread_handle;
481     EH_CATCH syscall_close_object(thread_handle);
482     EH_DONE;
483
484 cleanup:
485     dereference(&proc->header);
486     return ret;
487 }
488
489 sysret_t syscall_open_thread(dword_t tid, handle_t *handle)
490 {
491     int i;
492     thread_t *thread = NULL;
493     dword_t ret = ERR_NOTFOUND;
494     critical_t critical;
495     enter_critical(&critical);
496
497     if (current_thread->tid == tid)
498     {
499         thread = current_thread;
500     }
501     else
502     {
503         for (i = 0; i < THREAD_PRIORITY_MAX; i++)
504         {
505             list_entry_t *ptr = thread_queue[i].next;
506
507             for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
508             {
509                 thread_t *entry = CONTAINER_OF(ptr, thread_t, in_queue_list);
510
511                 if (entry->tid == tid)
512                 {
513                     thread = entry;
514                     goto found;
515                 }
516             }
517         }
518     }
519
520 found:
521     if (thread != NULL) ret = open_object(&thread->header, 0, handle);
522     else ret = ERR_NOTFOUND;
523
524     leave_critical(&critical);
525     return ret;
526 }
527
528 dword_t terminate_thread_internal(thread_t *thread, dword_t exit_code)
529 {
530     critical_t critical;
531     thread->cancel_io = TRUE;
532
533     if (thread != current_thread) acquire_lock(&thread->syscall_lock);
534     enter_critical(&critical);
535
536     thread->exit_code = exit_code;
537     thread->terminated = TRUE;
538     if (thread != current_thread) destroy_thread(thread);
539
540     leave_critical(&critical);
541
542     if (thread == current_thread) syscall_yield_quantum();
543     return ERR_SUCCESS;
544 }
545
546 sysret_t syscall_terminate_thread(handle_t handle, dword_t exit_code)
547 {
548     thread_t *thread;
549
550     if (handle == INVALID_HANDLE)
551     {
552         thread = get_current_thread();
553         reference(&thread->header);
554     }
555     else
556     {
557         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
558     }
559
560     return terminate_thread_internal(thread, exit_code);
561 }
562
563 sysret_t syscall_query_thread(handle_t handle, thread_info_t info_type, void *buffer, size_t size)
564 {
565     dword_t ret = ERR_SUCCESS;
566     thread_t *thread;
567     void *safe_buffer;
568
569     if (get_previous_mode() == USER_MODE)
570     {
571         if (!check_usermode(buffer, size)) return ERR_BADPTR;
572
573         safe_buffer = malloc(size);
574         if (safe_buffer == NULL) return ERR_NOMEMORY;
575         memset(safe_buffer, 0, size);
576     }
577     else
578     {
579         safe_buffer = buffer;
580     }
581
582     if (handle == INVALID_HANDLE)
583     {
584         thread = get_current_thread();
585         reference(&thread->header);
586     }
587     else
588     {
589         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
590     }
591
592     switch (info_type)
593     {
594         case THREAD_TID_INFO:
595             if (size >= sizeof(dword_t)) *((dword_t*)safe_buffer) = thread->tid;
596             else ret = ERR_SMALLBUF;
597             break;
598
599         case THREAD_FROZEN_INFO:
600             if (size >= sizeof(int32_t)) *((int32_t*)safe_buffer) = thread->frozen;
601             else ret = ERR_SMALLBUF;
602             break;
603
604         case THREAD_CPU_STATE_INFO:
605             if (size >= sizeof(thread_state_t))
606             {
607                 if (current_thread->tid != thread->tid)
608                 {
609                     *((thread_state_t*)safe_buffer) = thread->state;
610                 }
611                 else
612                 {
613                     ((thread_state_t*)safe_buffer)->regs = *thread->syscall_regs;
614                     fpu_save(((thread_state_t*)safe_buffer)->fpu_state);
615                 }
616             }
617             else
618             {
619                 ret = ERR_SMALLBUF;
620             }
621
622             break;
623
624         case THREAD_PRIORITY_INFO:
625             if (size >= sizeof(priority_t)) *((priority_t*)safe_buffer) = thread->priority;
626             else ret = ERR_SMALLBUF;
627             break;
628
629         case THREAD_AFFINITY_INFO:
630             if (size >= sizeof(affinity_t)) *((affinity_t*)safe_buffer) = thread->affinity;
631             else ret = ERR_SMALLBUF;
632
633         default:
634             ret = ERR_INVALID;
635     }
636
637     if (get_previous_mode() == USER_MODE)
638     {
639         EH_TRY memcpy(buffer, safe_buffer, size);
640         EH_CATCH ret = ERR_BADPTR;
641         EH_DONE;
642
643         free(safe_buffer);
644     }
645
646     dereference(&thread->header);
647     return ret;
648 }
649
650 sysret_t syscall_set_thread(handle_t handle, thread_info_t info_type, const void *buffer, size_t size)
651 {
652     dword_t ret;
653     thread_t *thread;
654     void *safe_buffer;
655
656     if (get_previous_mode() == USER_MODE)
657     {
658         if (!check_usermode(buffer, size)) return ERR_BADPTR;
659
660         safe_buffer = malloc(size);
661         if (safe_buffer == NULL) return ERR_NOMEMORY;
662
663         EH_TRY memcpy(safe_buffer, buffer, size);
664         EH_CATCH ret = ERR_BADPTR;
665         EH_DONE;
666     }
667     else
668     {
669         safe_buffer = (void*)buffer;
670     }
671
672     if (handle == INVALID_HANDLE)
673     {
674         thread = get_current_thread();
675         reference(&thread->header);
676     }
677     else
678     {
679         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread))
680         {
681             if (get_previous_mode() == USER_MODE) free(safe_buffer);
682             return ERR_INVALID;
683         }
684     }
685
686     switch (info_type)
687     {
688     case THREAD_CPU_STATE_INFO:
689         if (size >= sizeof(thread_state_t))
690         {
691             if (thread->owner_process->pid == kernel_process->pid) return ERR_FORBIDDEN;
692             thread_state_t *new_state = (thread_state_t*)safe_buffer;
693
694             critical_t critical;
695             if (current_thread->tid != thread->tid) enter_critical(&critical);
696
697             if (thread->syscall_lock == 0)
698             {
699                 thread->state.regs.eax = new_state->regs.eax;
700                 thread->state.regs.ecx = new_state->regs.ecx;
701                 thread->state.regs.edx = new_state->regs.edx;
702                 thread->state.regs.ebx = new_state->regs.ebx;
703                 thread->state.regs.esp = new_state->regs.esp;
704                 thread->state.regs.ebp = new_state->regs.ebp;
705                 thread->state.regs.esi = new_state->regs.esi;
706                 thread->state.regs.edi = new_state->regs.edi;
707                 thread->state.regs.eip = new_state->regs.eip;
708                 thread->state.regs.eflags = (thread->state.regs.eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
709             }
710             else if (thread->syscall_regs != NULL)
711             {
712                 thread->syscall_regs->eax = new_state->regs.eax;
713                 thread->syscall_regs->ecx = new_state->regs.ecx;
714                 thread->syscall_regs->edx = new_state->regs.edx;
715                 thread->syscall_regs->ebx = new_state->regs.ebx;
716                 thread->syscall_regs->esp = new_state->regs.esp;
717                 thread->syscall_regs->ebp = new_state->regs.ebp;
718                 thread->syscall_regs->esi = new_state->regs.esi;
719                 thread->syscall_regs->edi = new_state->regs.edi;
720                 thread->syscall_regs->eip = new_state->regs.eip;
721                 thread->syscall_regs->eflags = (thread->state.regs.eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
722             }
723
724
725             if (current_thread->tid != thread->tid)
726             {
727                 memcpy(thread->state.fpu_state, new_state->fpu_state, sizeof(thread->state.fpu_state));
728             }
729             else
730             {
731                 fpu_restore(new_state->fpu_state);
732             }
733
734             if (current_thread->tid != thread->tid) leave_critical(&critical);
735         }
736         else
737         {
738             ret = ERR_SMALLBUF;
739         }
740
741         break;
742
743     case THREAD_PRIORITY_INFO:
744         if (size >= sizeof(priority_t)) thread->priority = *((priority_t*)safe_buffer);
745         else ret = ERR_SMALLBUF;
746
747     case THREAD_AFFINITY_INFO:
748         if (size >= sizeof(affinity_t)) thread->affinity = *((affinity_t*)safe_buffer);
749         else ret = ERR_SMALLBUF;
750         break;
751
752     default:
753         ret = ERR_INVALID;
754     }
755
756     if (get_previous_mode() == USER_MODE) free(safe_buffer);
757     dereference(&thread->header);
758     return ret;
759 }
760
761 sysret_t syscall_wait_thread(handle_t handle, dword_t timeout)
762 {
763     dword_t ret;
764     thread_t *thread;
765
766     if (handle == INVALID_HANDLE)
767     {
768         thread = get_current_thread();
769         reference(&thread->header);
770     }
771     else
772     {
773         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
774     }
775
776     ret = scheduler_wait(WAIT_UNTIL_NOT_EQUAL, timeout, &thread->terminated, FALSE);
777
778     dereference(&thread->header);
779     return ret;
780 }
781
782 sysret_t syscall_freeze_thread(handle_t handle)
783 {
784     dword_t ret = ERR_SUCCESS;
785     thread_t *thread;
786
787     if (handle == INVALID_HANDLE)
788     {
789         thread = get_current_thread();
790         reference(&thread->header);
791     }
792     else
793     {
794         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
795     }
796
797     thread->frozen++;
798
799     dereference(&thread->header);
800     return ret;
801 }
802
803 sysret_t syscall_thaw_thread(handle_t handle)
804 {
805     dword_t ret = ERR_SUCCESS;
806     thread_t *thread;
807
808     if (handle == INVALID_HANDLE)
809     {
810         thread = get_current_thread();
811         reference(&thread->header);
812     }
813     else
814     {
815         if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
816     }
817
818     thread->frozen--;
819
820     dereference(&thread->header);
821     return ret;
822 }
823
824 void thread_init(void)
825 {
826     int i;
827     critical_t critical;
828
829     memset(tid_alloc_bitmap, 0, sizeof(tid_alloc_bitmap));
830     set_bit(tid_alloc_bitmap, 0);
831
832     thread_t *main_thread = (thread_t*)malloc(sizeof(thread_t));
833     if (main_thread == NULL) KERNEL_CRASH("Cannot allocate thread object");
834
835     init_object(&main_thread->header, NULL, OBJECT_THREAD);
836
837     if (create_object(&main_thread->header) != ERR_SUCCESS)
838     {
839         KERNEL_CRASH("Cannot initialize thread object");
840     }
841
842     main_thread->tid = 0;
843     main_thread->priority = THREAD_PRIORITY_MID;
844     main_thread->kernel_stack = malloc(KERNEL_STACK_SIZE);
845     ASSERT(main_thread->kernel_stack != NULL);
846     commit_pages(main_thread->kernel_stack, KERNEL_STACK_SIZE);
847     main_thread->kernel_esp = ((uintptr_t)main_thread->kernel_stack + KERNEL_STACK_SIZE) & ~3;
848     set_kernel_esp(main_thread->kernel_esp);
849     main_thread->exit_code = 0;
850     main_thread->quantum = 0;
851     main_thread->running_ticks = 0ULL;
852     main_thread->owner_process = kernel_process;
853     list_append(&kernel_process->threads, &main_thread->in_process_list);
854     main_thread->syscall_lock = 0;
855     main_thread->terminated = FALSE;
856     main_thread->previous_mode = KERNEL_MODE;
857     main_thread->wait_condition = WAIT_NEVER;
858     main_thread->wait_timestamp = 0ULL;
859     main_thread->wait_timeout = 0;
860     main_thread->wait_pointer = NULL;
861     main_thread->wait_value = 0;
862
863     memset(&main_thread->kernel_handler, 0, sizeof(main_thread->kernel_handler));
864     memset(&main_thread->user_handler, 0, sizeof(main_thread->user_handler));
865
866     enter_critical(&critical);
867
868     current_thread = main_thread;
869     for (i = 0; i < THREAD_PRIORITY_MAX; i++) list_init(&thread_queue[i]);
870     scheduler_enabled = TRUE;
871
872     leave_critical(&critical);
873 }