diff --git a/kernel/boot/kmain.c b/kernel/boot/kmain.c index 433b438..8c23c98 100644 --- a/kernel/boot/kmain.c +++ b/kernel/boot/kmain.c @@ -3,6 +3,16 @@ #include #include #include +#include + +void thread_function() +{ + while(1) + { + debug("%d", get_current_thread()->tid); + schedule(); + } +} int kmain(uint64_t multiboot_magic, void *multiboot_data) { @@ -16,8 +26,17 @@ int kmain(uint64_t multiboot_magic, void *multiboot_data) vmm_init(); pmm_init(); gdt_init(); + scheduler_init(); + scheduler_insert(new_thread(thread_function)); + scheduler_insert(new_thread(thread_function)); + scheduler_insert(new_thread(thread_function)); + scheduler_insert(new_thread(thread_function)); + scheduler_insert(new_thread(thread_function)); + debug_info("BOOT COMPLETE\n"); + schedule(); + debug_error("PANIC - This line should be unreachable (%s:%d)\n", __FILE__, __LINE__); for(;;)asm("hlt"); } diff --git a/kernel/include/list.h b/kernel/include/list.h new file mode 100644 index 0000000..2cfd9a1 --- /dev/null +++ b/kernel/include/list.h @@ -0,0 +1,69 @@ +#pragma once + +#define LIST(type, name) \ + struct { \ + type *prev; \ + type *next; \ + } name; + +#define LIST_HEAD_INIT(name) \ + name.prev = name.next = 0; + +#define LIST_INIT(self, list) \ + self->list.prev = self->list.next = 0; + +#define LIST_APPEND(head, self, list) \ + do { \ + self->list.prev = head.prev; \ + self->list.next = 0; \ + if(self->list.prev) { \ + self->list.prev->list.next = self; \ + } \ + head.prev = self; \ + if(!head.next){head.next = head.prev;} \ + } while(0); + +#define LIST_REMOVE(head, self, list) \ + do { \ + if(head.next == self) { \ + head.next = self->list.next; \ + } \ + if(head.prev == self) { \ + head.prev = self->list.prev; \ + } \ + if(self->list.next) { \ + self->list.next->list.prev = self->list.prev; \ + } \ + if(self->list.prev) { \ + self->list.prev->list.next = self->list.next; \ + } \ + self->list.prev = self->list.next = 0; \ + } while(0); + +#define LIST_EMPTY(head) ((head.next) == 0) +#define LIST_FIRST(head) (head.next) + +#define LIST_FOREACH(head, type, name, list) \ + for(type *name = head.next; name; name = name->list.next) + +#define LIST_INSERT_BEFORE(head, self, list, after) \ + do { \ + self->list.next = after; \ + self->list.next = after->list.prev; \ + after->list.prev = self; \ + if(self->list.prev) \ + self->list.prev->list.next = self; \ + if(head.next == after) \ + head.next = self; \ + } while(0); + +#define LIST_INSERT_AFTER(head, self, list, before) \ + do { \ + self->list.prev = before; \ + self->list.next = before->list.next; \ + before->list.next = self; \ + if(self->list.next) \ + self->list.next->list.prev = self; \ + if(head->prev == before) \ + head.prev = self; \ + } while(0); diff --git a/kernel/include/scheduler.h b/kernel/include/scheduler.h new file mode 100644 index 0000000..ca1e8d5 --- /dev/null +++ b/kernel/include/scheduler.h @@ -0,0 +1,6 @@ +#pragma once +#include + +void schedule(); +void scheduler_insert(thread_t *th); +void scheduler_init(); diff --git a/kernel/include/thread.h b/kernel/include/thread.h new file mode 100644 index 0000000..b74f34b --- /dev/null +++ b/kernel/include/thread.h @@ -0,0 +1,43 @@ +#pragma once +#include +#include + +#define THREAD_STACK_SIZE 0x1000-sizeof(thread_t) + +typedef struct thread_st +{ + uint64_t stack_pointer; // Top of the kernel stack for thread + uint64_t tid; + uint64_t state; + LIST(struct thread_st, ready_queue); +} thread_t; + +#define THREAD_STATE_WAITING 0x1 +#define THREAD_STATE_READY 0x2 +#define THREAD_STATE_RUNNING 0x3 +#define THREAD_STATE_FINISHED 0x4 + +typedef struct thread_stack_st +{ + uint8_t stack[THREAD_STACK_SIZE-8*8]; + // Stack layout of a new thread + uint64_t RBP; + uint64_t RBX; + uint64_t R12; + uint64_t R13; + uint64_t R14; + uint64_t R15; + uint64_t zero_frame; + uint64_t function_address; + thread_t tcb; +} thread_stack_t; + +thread_t *current_thread; +#define get_current_thread() (current_thread) +#define set_current_thread(new) (current_thread = (new)) + +thread_t *new_thread(void (*func)(void)); +void switch_thread(thread_t *old, thread_t *new); +void free_thread(thread_t *th); + +void swtch(uint64_t *old, uint64_t *new); diff --git a/kernel/interrupts/interrupts.c b/kernel/interrupts/interrupts.c index 46bbe36..d7e9b0d 100644 --- a/kernel/interrupts/interrupts.c +++ b/kernel/interrupts/interrupts.c @@ -3,6 +3,7 @@ #include #include #include +#include struct int_gate_descriptor idt[NUM_INTERRUPTS]; struct idtr idtr; @@ -60,6 +61,8 @@ registers_t *int_handler(registers_t *r) print_registers(r); #ifndef NDEBUG + thread_t *th = get_current_thread(); + (void)th; asm("int_handler_breakpoint:"); #endif diff --git a/kernel/mem/vmm.c b/kernel/mem/vmm.c index 0fd839d..3912872 100644 --- a/kernel/mem/vmm.c +++ b/kernel/mem/vmm.c @@ -3,6 +3,7 @@ #include #include #include +#include #define STRIP_FLAGS(addr) ((void *)(((uintptr_t)(addr)) & ~PAGE_FLAGS_MASK)) @@ -142,6 +143,8 @@ registers_t *page_fault_handler(registers_t *r) print_registers(r); #ifndef NDEBUG + thread_t *th = get_current_thread(); + (void)th; asm("page_fault_breakpoint:"); #endif for(;;); diff --git a/kernel/proc/scheduler.c b/kernel/proc/scheduler.c new file mode 100644 index 0000000..e4af472 --- /dev/null +++ b/kernel/proc/scheduler.c @@ -0,0 +1,70 @@ +#include +#include +#include + +LIST(thread_t, ready_queue); + +thread_t *scheduler_th; +thread_t *last_thread = 0; +#define get_last_thread() (last_thread) +#define set_last_thread(new) (last_thread = (new)) + +void scheduler_insert(thread_t *th) +{ + // Append thread to the ready queue and prepare it for running + LIST_APPEND(ready_queue, th, ready_queue); + th->state = THREAD_STATE_READY; +} + +void scheduler_remove(thread_t *th) +{ + // Remove thread from the ready queue + LIST_REMOVE(ready_queue, th, ready_queue); +} + +thread_t *scheduler_next() +{ + // Get the next thread from the ready queue + if(!LIST_EMPTY(ready_queue)) + { + thread_t *th = LIST_FIRST(ready_queue); + scheduler_remove(th); + return th; + } + return 0; +} + +void scheduler() +{ + while(1) + { + thread_t *old = 0, *new = 0; + if((old = get_last_thread())) + { + if(old->state == THREAD_STATE_RUNNING) + { + old->state = THREAD_STATE_READY; + scheduler_insert(old); + } + } + while(!(new = scheduler_next())); + new->state = THREAD_STATE_RUNNING; + set_last_thread(new); + switch_thread(scheduler_th, new); + } +} + +void schedule() +{ + // This function handles swithing to the next thread in the ready queue + + thread_t *old = get_current_thread(); + + switch_thread(old, scheduler_th); +} + +void scheduler_init() +{ + LIST_HEAD_INIT(ready_queue); + scheduler_th = new_thread(scheduler); +} diff --git a/kernel/proc/swtch.S b/kernel/proc/swtch.S new file mode 100644 index 0000000..84d5226 --- /dev/null +++ b/kernel/proc/swtch.S @@ -0,0 +1,28 @@ +.intel_syntax noprefix + +.global swtch + +swtch: +# void swtch(uint64_t *old, uint64_t *new) +# Switches stacks preserving callee preserved registers according to System V ABI + push rbp + mov rbp, rsp + push r15 + push r14 + push r13 + push r12 + push rbx + push rbp + cmp rdi, 0x0 + jz .switch_in + mov [rdi], rsp +.switch_in: + mov rsp, [rsi] + pop rbp + pop rbx + pop r12 + pop r13 + pop r14 + pop r15 + leaveq + ret diff --git a/kernel/proc/thread.c b/kernel/proc/thread.c new file mode 100644 index 0000000..5f598db --- /dev/null +++ b/kernel/proc/thread.c @@ -0,0 +1,47 @@ +#include +#include +#include +#include +#include + +thread_t *current_thread = 0; + +uint64_t tid = 1; + + +thread_t *new_thread(void (*func)(void)) +{ + // Set up original stack of thread + thread_stack_t *stack = kcalloc(1, sizeof(thread_stack_t)); + thread_t *th = &stack->tcb; + + stack->function_address = (uint64_t)func; + stack->RBP = (uint64_t)&stack->zero_frame; + + th->tid = tid++; + th->state = THREAD_STATE_READY; + th->stack_pointer = (uint64_t)&stack->RBP; + + LIST_INIT(th, ready_queue); + + return th; +} + +void switch_thread(thread_t *old, thread_t *new) +{ + set_current_thread(new); + + uint64_t *old_stack = (old)?&old->stack_pointer:0; + swtch(old_stack, &new->stack_pointer); +} + +void free_thread(thread_t *th) +{ + if(th->state == THREAD_STATE_RUNNING || th->state == THREAD_STATE_READY) + { + debug_error("Trying to free a live thread!\n"); + for(;;); + } + thread_stack_t *stack = incptr(th, -offsetof(thread_stack_t, tcb)); + kfree(stack); +}