Browse Source

finished memory manager(?)

master
Mathieu Serandour 1 year ago
parent
commit
8173d026ea
  1. 97
      kernel/memory/paging.c
  2. 5
      kernel/memory/paging.h
  3. 5
      kernel/memory/physical_allocator.c
  4. 3
      kernel/memory/vmap.h

97
kernel/memory/paging.c

@ -22,6 +22,14 @@ extern void set_cr4(uint64_t cr4);
#define CR4_PCIDE (1lu << 17)
// size of bulks of allocation
// the page buffer is 64 long
// so we suppose that 4096 pages
// won't make more than 64 page
// tables
#define MAX_ALLOC 1024
/**
* 4th level table (pde) entry
*/
@ -164,6 +172,7 @@ static void map_physical_memory(const struct stivale2_struct_tag_memmap* memmap)
if(size == 0)
continue;
uint64_t virtual_addr = translate_address(phys_addr);
@ -174,7 +183,6 @@ static void map_physical_memory(const struct stivale2_struct_tag_memmap* memmap)
}
}
}
/*
static void map_allocator_data(void) {
@ -253,11 +261,11 @@ static int present_entry(void* entry) {
return (uint64_t)entry & PRESENT_ENTRY;
}
/*
function for debug purposes
//function for debug purposes
static void print_struct(int level, void** table, uint64_t virt) {
uint64_t* addr = table;
uint64_t* addr = translate_address(table);
//if(level > 1)
// return ;
@ -269,7 +277,7 @@ static void print_struct(int level, void** table, uint64_t virt) {
for(int i = 0; i < level; i++)
kputs("-");
if(level == 3) {
if(level == 2) {
kprintf(" %lx -> %lx\n", v << 12, extract_pointer(addr[i]));
}
else {
@ -279,7 +287,7 @@ static void print_struct(int level, void** table, uint64_t virt) {
}
}
}
*/
@ -291,23 +299,23 @@ void init_paging(const struct stivale2_struct_tag_memmap* memmap) {
// so memory is both identity mapped and transtated
// so *x = *translate_address(x)
pml4[0] = create_table_entry(
(void*)early_virtual_to_physical(pdpt_low),
PRESENT_ENTRY
);
pml4[256] = create_table_entry(
(void*)early_virtual_to_physical(pdpt_mid),
PRESENT_ENTRY | PL_US
);
//pml4[0] = create_table_entry(
// (void*)early_virtual_to_physical(pdpt_low),
// PRESENT_ENTRY
// );
//pml4[256] = create_table_entry(
// (void*)early_virtual_to_physical(pdpt_mid),
// PRESENT_ENTRY | PL_US
// );
// the high half memory is supervisor only
// so that no user can access it eventhough the entry
// stays in the pml4!
pml4[511] = create_table_entry(
(void*)early_virtual_to_physical(pdpt_high),
PRESENT_ENTRY | PL_US
);
//pml4[511] = create_table_entry(
// (void*)early_virtual_to_physical(pdpt_high),
// PRESENT_ENTRY | PL_US
// );
// map all the memory to 0xffff800000000000
@ -320,14 +328,13 @@ void init_paging(const struct stivale2_struct_tag_memmap* memmap) {
// in order to avoid awful recursion bugs
alloc_page_table_realloc = 0;
//print_struct(0, pml4, 0);
// map the kernel
map_kernel(memmap);
}
void append_initialization(void) {
void append_paging_initialization(void) {
// enable PAE in cr4
// disable PCIDE
@ -428,8 +435,6 @@ void map_pages(uint64_t physical_addr,
uint64_t virtual_addr,
size_t count,
unsigned flags) {
while(count > 0) {
// fetch table indexes
@ -444,15 +449,23 @@ void map_pages(uint64_t physical_addr,
pdpte restrict pdptentry = extract_pointer(get_entry_or_allocate((void**)pml4entry, pdpti));
pde restrict pdentry = extract_pointer(get_entry_or_allocate((void**)pdptentry, pdi));
while(count > 0 && pti < 512) {
// create a new entry
uint64_t e = create_table_entry((void*)physical_addr,flags);
void** entry_ptr = (void**)translate_address(pdentry) + pti;
assert(!present_entry(*entry_ptr));
if(present_entry(*entry_ptr)) {
char buff[256];
sprintf(buff,
"map_pages(...,flags=%u):\n"
" tried to map physical memory 0x%lx to 0x%lx, but physical memory 0x%lx"
" was already mapped here",
flags, physical_addr, virtual_addr, extract_pointer(*entry_ptr));
panic(buff);
}
*entry_ptr = e;
@ -464,3 +477,37 @@ void map_pages(uint64_t physical_addr,
}
}
}
void alloc_pages(uint64_t virtual_addr_begin,
size_t count,
unsigned flags) {
// don't allow recusion
alloc_page_table_realloc = 0;
void callback(
uint64_t physical_address,
uint64_t virtual_address,
size_t c) {
map_pages(physical_address,
virtual_address,
c,
flags);
};
while(count > 0) {
unsigned size = count;
if(size > MAX_ALLOC)
size = MAX_ALLOC;
fill_page_table_allocator_buffer(16);
physalloc(size, virtual_addr_begin, callback);
count -= size;
virtual_addr_begin += size * 0x1000;
}
}

5
kernel/memory/paging.h

@ -13,7 +13,7 @@ struct stivale2_struct_tag_memmap;
* map the kernel executable to high half: 0xffffffff80000000 + phys
*/
void init_paging(const struct stivale2_struct_tag_memmap* memmap);
void append_initialization(void);
void append_paging_initialization(void);
// page table flags
@ -46,3 +46,6 @@ void map_pages(uint64_t physical_addr,
size_t count,
unsigned flags);
void alloc_pages(uint64_t virtual_addr,
size_t count,
unsigned flags);

5
kernel/memory/physical_allocator.c

@ -260,7 +260,7 @@ static void* select_bitmap(struct MR_header* header,
// return a pointer to the MR header
static struct MR_header* get_header_base(const struct memory_range* range) {
return range->base;
return (void *)((uint64_t)range->base | TRANSLATED_PHYSICAL_MEMORY_BEGIN);
}
// modifies the bitmaps to allocate
@ -401,6 +401,7 @@ void physalloc(size_t size, void* virtual_addr, PHYSALLOC_CALLBACK callback) {
while(size > 0) {
unsigned max_block_size_id = 0;
struct memory_range* range = getMR(size, &max_block_size_id);
const unsigned memory_range_length = range->length;
@ -483,7 +484,7 @@ void physalloc(size_t size, void* virtual_addr, PHYSALLOC_CALLBACK callback) {
// loop through the region inside the lvl0 map
for(unsigned j = 0; j < granularity; j++) {
void* target_address = range->base + (curr_page+1) * 0x1000;
callback((uint64_t)target_address, (uint64_t)virtual_addr, 0x1000);
callback((uint64_t)target_address, (uint64_t)virtual_addr, 1);
alloc_page_bitmaps(header, curr_page);

3
kernel/memory/vmap.h

@ -42,7 +42,8 @@
*
*/
#define APIC_VIRTUAL_ADDRESS 0xffffffff20000000llu
#define HPET_VIRTUAL_ADDRESS 0xffffffff1fffe000llu
#define APIC_VIRTUAL_ADDRESS 0xffffffff1ffff000llu

Loading…
Cancel
Save