|
|
@ -6,6 +6,16 @@ |
|
|
|
#include "../memory/vmap.h" |
|
|
|
#include "../memory/heap.h" |
|
|
|
#include "../memory/paging.h" |
|
|
|
#include "../smp/smp.h" |
|
|
|
|
|
|
|
// for get_rflags()
|
|
|
|
#include "../lib/registers.h" |
|
|
|
|
|
|
|
// for the yield interrupt handler
|
|
|
|
#include "../int/irq.h" |
|
|
|
|
|
|
|
#define XSTR(s) STR(s) |
|
|
|
#define STR(x) #x |
|
|
|
|
|
|
|
/**
|
|
|
|
* schedule algorithm: |
|
|
@ -47,6 +57,14 @@ static size_t n_threads = 0; |
|
|
|
|
|
|
|
static process_t* processes; |
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* this lock is used to protect the process table |
|
|
|
* and the ready queues |
|
|
|
* |
|
|
|
*/ |
|
|
|
fast_spinlock_t sched_lock = {0}; |
|
|
|
|
|
|
|
/**
|
|
|
|
* when invoking a syscall, |
|
|
|
* syscall_stacks[lapic_id] should contain |
|
|
@ -56,28 +74,65 @@ static process_t* processes; |
|
|
|
void** syscall_stacks; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void yield_irq_handler(struct driver* unused) { |
|
|
|
// nothing to do,
|
|
|
|
// everything is done by the
|
|
|
|
// common interrupt handler
|
|
|
|
(void) unused; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void sched_init(void) { |
|
|
|
// init syscall stacks
|
|
|
|
|
|
|
|
syscall_stacks = malloc(sizeof(void*) * get_smp_count()); |
|
|
|
|
|
|
|
// init the virtual yield IRQ
|
|
|
|
register_irq(YIELD_IRQ, yield_irq_handler, 0); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief alloc a process in the processes list |
|
|
|
* and return a pointer on it |
|
|
|
* and return a pointer on it. |
|
|
|
* The caller should have the interrupts disabled |
|
|
|
* |
|
|
|
* the scheduler lock is held and not released |
|
|
|
* the caller is responsible for releasing the lock |
|
|
|
* after inserting the process in the list |
|
|
|
* |
|
|
|
* @return process_t* |
|
|
|
*/ |
|
|
|
static process_t* add_process(void) { |
|
|
|
// add it in the end
|
|
|
|
spinlock_acquire(&sched_lock); |
|
|
|
|
|
|
|
unsigned id = n_processes++; |
|
|
|
processes = realloc(processes, sizeof(process_t) * n_processes); |
|
|
|
|
|
|
|
|
|
|
|
return processes + id; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void remove_process(process_t* process) { |
|
|
|
// first lock the sched lock
|
|
|
|
spinlock_acquire(&sched_lock); |
|
|
|
|
|
|
|
// remove it from the list
|
|
|
|
|
|
|
|
unsigned id = (process - processes); |
|
|
|
|
|
|
|
|
|
|
|
processes[id] = processes[--n_processes]; |
|
|
|
|
|
|
|
// unlock the sched lock
|
|
|
|
spinlock_release(&sched_lock); |
|
|
|
|
|
|
|
|
|
|
|
log_warn("process %d removed", id); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
#define N_QUEUES 4 |
|
|
|
|
|
|
|
// alloc pids in a circular way
|
|
|
@ -92,6 +147,11 @@ gp_regs_t* kernel_saved_rsp = NULL; |
|
|
|
void __attribute__((noreturn)) _restore_context(struct gp_regs* rsp); |
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* the process lock should be taken before |
|
|
|
* calling this function |
|
|
|
* |
|
|
|
*/ |
|
|
|
static |
|
|
|
thread_t* get_thread_by_tid(process_t* process, tid_t tid) { |
|
|
|
|
|
|
@ -107,14 +167,34 @@ thread_t* get_thread_by_tid(process_t* process, tid_t tid) { |
|
|
|
|
|
|
|
|
|
|
|
process_t* sched_get_process(pid_t pid) { |
|
|
|
|
|
|
|
|
|
|
|
if(pid == KERNEL_PID) |
|
|
|
return NULL; |
|
|
|
|
|
|
|
// not found
|
|
|
|
process_t* process = NULL; |
|
|
|
|
|
|
|
_cli(); |
|
|
|
|
|
|
|
|
|
|
|
// lock the process table
|
|
|
|
spinlock_acquire(&sched_lock); |
|
|
|
|
|
|
|
// sequencial research
|
|
|
|
for(unsigned i = 0; i < n_processes; i++) { |
|
|
|
if(processes[i].pid == pid) |
|
|
|
return &processes[i]; |
|
|
|
process = &processes[i]; |
|
|
|
} |
|
|
|
|
|
|
|
return NULL; |
|
|
|
|
|
|
|
// lock the process to make sure it won't be freed
|
|
|
|
spinlock_acquire(&process->lock); |
|
|
|
|
|
|
|
// unlock the process table
|
|
|
|
spinlock_release(&sched_lock); |
|
|
|
|
|
|
|
|
|
|
|
return process; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -127,29 +207,124 @@ pid_t sched_create_process(pid_t ppid, const void* elffile, size_t elffile_sz) { |
|
|
|
// NULL: kernel process
|
|
|
|
process_t* parent = NULL; |
|
|
|
|
|
|
|
|
|
|
|
uint64_t rf = get_rflags(); |
|
|
|
_cli(); |
|
|
|
|
|
|
|
if(ppid != KERNEL_PID) { |
|
|
|
parent = sched_get_process(ppid); |
|
|
|
|
|
|
|
if(!parent) { |
|
|
|
// no such ppid
|
|
|
|
|
|
|
|
// unlock process
|
|
|
|
|
|
|
|
set_rflags(rf); |
|
|
|
return -1; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
if(!create_process(&new, parent, elffile, elffile_sz)) |
|
|
|
|
|
|
|
int ret = create_process(&new, parent, elffile, elffile_sz); |
|
|
|
|
|
|
|
// unlock process
|
|
|
|
|
|
|
|
if(parent) |
|
|
|
spinlock_release(&parent->lock); |
|
|
|
|
|
|
|
if(!ret) { |
|
|
|
// error
|
|
|
|
set_rflags(rf); |
|
|
|
return -1; |
|
|
|
} |
|
|
|
|
|
|
|
_cli(); |
|
|
|
|
|
|
|
// add it to the processes list
|
|
|
|
*add_process() = new; |
|
|
|
spinlock_release(&sched_lock); |
|
|
|
|
|
|
|
_sti(); |
|
|
|
set_rflags(rf); |
|
|
|
|
|
|
|
return new.pid; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
int sched_kill_process(pid_t pid, int status) { |
|
|
|
// the entier operation is critical
|
|
|
|
_cli(); |
|
|
|
|
|
|
|
// process is aquired there
|
|
|
|
process_t* process = sched_get_process(pid); |
|
|
|
|
|
|
|
|
|
|
|
if(!process) { |
|
|
|
// no such process
|
|
|
|
_sti(); |
|
|
|
return -1; |
|
|
|
} |
|
|
|
|
|
|
|
int running = 0; |
|
|
|
|
|
|
|
for(unsigned i = 0; i < process->n_threads; i++) { |
|
|
|
thread_t* thread = &process->threads[i]; |
|
|
|
|
|
|
|
if(thread->state == RUNNING) { |
|
|
|
// we can't kill a running process
|
|
|
|
// we must wait for it to finish
|
|
|
|
|
|
|
|
// we do a lazy kill
|
|
|
|
|
|
|
|
// @todo send an IPI to the thread cpu
|
|
|
|
// to force it to stop earlier
|
|
|
|
|
|
|
|
thread->should_exit = 1; |
|
|
|
running = 1; |
|
|
|
} |
|
|
|
else if(thread->state == READY) { |
|
|
|
// remove it from the ready queue
|
|
|
|
// and free the thread
|
|
|
|
|
|
|
|
thread_terminate(thread, status); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if(!running) { |
|
|
|
// no running thread, we can kill the process
|
|
|
|
|
|
|
|
// remove the process from the list
|
|
|
|
// and free it
|
|
|
|
|
|
|
|
// avoid deadlocks:
|
|
|
|
// we must release the process lock
|
|
|
|
// before taking the sched lock
|
|
|
|
// as a process could be concurrently
|
|
|
|
// try to take the sched lock
|
|
|
|
// and then try to take the process lock
|
|
|
|
|
|
|
|
spinlock_release(&process->lock); |
|
|
|
|
|
|
|
remove_process(process); |
|
|
|
free_process(process); |
|
|
|
|
|
|
|
// the process has been successfully removed
|
|
|
|
// from the list. Thus, noone can fetch it.
|
|
|
|
// though it is still be taken
|
|
|
|
spinlock_acquire(&sched_lock); |
|
|
|
|
|
|
|
|
|
|
|
} |
|
|
|
else { |
|
|
|
// we can't kill the process
|
|
|
|
// we must wait for it to finish
|
|
|
|
spinlock_release(&process->lock); |
|
|
|
} |
|
|
|
|
|
|
|
_sti(); |
|
|
|
|
|
|
|
// success
|
|
|
|
return 0; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void sched_save(gp_regs_t* rsp) { |
|
|
@ -162,58 +337,191 @@ void sched_save(gp_regs_t* rsp) { |
|
|
|
else { |
|
|
|
kernel_saved_rsp = NULL; |
|
|
|
|
|
|
|
// p's lock is taken there
|
|
|
|
process_t* p = sched_get_process(current_pid); |
|
|
|
assert(p); |
|
|
|
thread_t* t = get_thread_by_tid(p, current_tid); |
|
|
|
|
|
|
|
assert(t); |
|
|
|
|
|
|
|
t->state = READY; |
|
|
|
t->rsp = rsp; |
|
|
|
|
|
|
|
static int i = 0; |
|
|
|
|
|
|
|
// save page table
|
|
|
|
p->saved_page_dir_paddr = get_user_page_map(); |
|
|
|
|
|
|
|
// release process lock
|
|
|
|
spinlock_release(&p->lock); |
|
|
|
|
|
|
|
/*
|
|
|
|
log_warn("SAVED cs=%x, ss=%x, rsp=%x, rip=%x", t->rsp->cs, t->rsp->ss, t->rsp->rsp, t->rsp->rip); |
|
|
|
stacktrace_print(); |
|
|
|
*/ |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void sched_yield(void) { |
|
|
|
// invoke a custom interrupt
|
|
|
|
// that does nothing but a
|
|
|
|
// rescheduling
|
|
|
|
|
|
|
|
_cli(); |
|
|
|
|
|
|
|
// takes p's lock
|
|
|
|
process_t* p = sched_get_process(current_pid); |
|
|
|
|
|
|
|
assert(p); // no such process
|
|
|
|
|
|
|
|
thread_t* t = get_thread_by_tid(p, current_tid); |
|
|
|
|
|
|
|
assert(t); // no such thread
|
|
|
|
|
|
|
|
t->state = READY; |
|
|
|
|
|
|
|
// release process lock
|
|
|
|
spinlock_release(&p->lock); |
|
|
|
|
|
|
|
asm volatile("int $" XSTR(YIELD_IRQ)); |
|
|
|
|
|
|
|
__builtin_unreachable(); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static process_t* choose_next(void) { |
|
|
|
|
|
|
|
// lock the process table
|
|
|
|
spinlock_acquire(&sched_lock); |
|
|
|
|
|
|
|
// sequencial research
|
|
|
|
for(int i = n_processes-1; i >= 0; i--) { |
|
|
|
process_t* process = &processes[i]; |
|
|
|
|
|
|
|
if(process->n_threads > 0) { |
|
|
|
// lock the process to make sure it won't be freed
|
|
|
|
spinlock_acquire(&process->lock); |
|
|
|
|
|
|
|
// unlock the process table
|
|
|
|
spinlock_release(&sched_lock); |
|
|
|
|
|
|
|
// return it
|
|
|
|
return process; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// unlock the process table
|
|
|
|
spinlock_release(&sched_lock); |
|
|
|
|
|
|
|
// no process found
|
|
|
|
return NULL; |
|
|
|
} |
|
|
|
|
|
|
|
void schedule(void) { |
|
|
|
if(currenly_in_irq && current_pid == KERNEL_PID) { |
|
|
|
// do not schedule if the interrupted task
|
|
|
|
// was the kernel
|
|
|
|
assert(kernel_saved_rsp); |
|
|
|
|
|
|
|
currenly_in_irq = 0; |
|
|
|
_restore_context(kernel_saved_rsp); |
|
|
|
} |
|
|
|
|
|
|
|
// do not schedule if the interrupted task
|
|
|
|
// was the kernel
|
|
|
|
|
|
|
|
// only execute the process with pid 1 for now.
|
|
|
|
// disable interrupts
|
|
|
|
_cli(); |
|
|
|
|
|
|
|
process_t* p = sched_get_process(1); |
|
|
|
thread_t* t = &p->threads[0]; |
|
|
|
// next process
|
|
|
|
process_t* p; |
|
|
|
|
|
|
|
assert(p->n_threads == 1); |
|
|
|
assert(p->pid == 1); |
|
|
|
assert(t->tid == 1); |
|
|
|
// next thread
|
|
|
|
thread_t* t; |
|
|
|
|
|
|
|
// select one process
|
|
|
|
while(1) { |
|
|
|
// lazy kill: we check if the process
|
|
|
|
// should be scheduled.
|
|
|
|
|
|
|
|
// process is locked there
|
|
|
|
p = choose_next(); |
|
|
|
t = &p->threads[0]; |
|
|
|
|
|
|
|
assert(p->n_threads == 1); |
|
|
|
//assert(p->pid == 1);
|
|
|
|
assert(t->tid == 1); |
|
|
|
|
|
|
|
|
|
|
|
if(t->should_exit && t->state == READY) { |
|
|
|
// if t->state != READY,
|
|
|
|
// the process might be executing already
|
|
|
|
// in kernel space, on the core we are executing
|
|
|
|
// right now. Thus, we can't kill it, as
|
|
|
|
// our kernel stack resides in its memory.
|
|
|
|
|
|
|
|
// lazy thread kill
|
|
|
|
// we must kill the thread
|
|
|
|
// and free it
|
|
|
|
|
|
|
|
thread_terminate(t, t->exit_status); |
|
|
|
|
|
|
|
if(--p->n_threads == 0) { |
|
|
|
// no more threads, we can kill the process
|
|
|
|
|
|
|
|
// remove the process from the list
|
|
|
|
// and free it
|
|
|
|
free_process(p); |
|
|
|
} |
|
|
|
else { |
|
|
|
// we can't kill the process
|
|
|
|
// we must wait for it to finish
|
|
|
|
spinlock_release(&p->lock); |
|
|
|
} |
|
|
|
} |
|
|
|
else break; |
|
|
|
} |
|
|
|
|
|
|
|
// the process is selected for execution
|
|
|
|
|
|
|
|
// map the user space
|
|
|
|
set_user_page_map(p->page_dir_paddr); |
|
|
|
set_user_page_map(p->saved_page_dir_paddr); |
|
|
|
|
|
|
|
_cli(); |
|
|
|
t->state = RUNNING; |
|
|
|
|
|
|
|
|
|
|
|
current_pid = p->pid; |
|
|
|
current_tid = t->tid; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// unlock the process
|
|
|
|
// there is no risk for the process to be freed
|
|
|
|
// because the current thread is marked RUNNING
|
|
|
|
spinlock_release(&p->lock); |
|
|
|
|
|
|
|
syscall_stacks[get_lapic_id()] = (void*) t->kernel_stack.base + t->kernel_stack.size; |
|
|
|
|
|
|
|
|
|
|
|
_restore_context(t->rsp); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void schedule_irq_handler(void) { |
|
|
|
schedule(); |
|
|
|
panic("unreachable"); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
pid_t sched_current_pid(void) { |
|
|
|
return current_pid; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
tid_t sched_current_tid(void) { |
|
|
|
return current_tid; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
process_t* sched_current_process(void) { |
|
|
|
if(!current_pid) { |
|
|
|
// current process: kernel
|
|
|
|