Browse Source

physalloc done

master
Mathieu Serandour 1 year ago
parent
commit
2cdaaeb8cd
  1. 12
      kernel/memory/paging.c
  2. 146
      kernel/memory/physical_allocator.c

12
kernel/memory/paging.c

@ -72,17 +72,16 @@ enum stivale2_mmap_type : uint32_t {
void physical_allocator_callback(uint64_t physical_address,
uint64_t virtual_address,
size_t size) {
//kprintf("%lx -> %lx\r", virtual_address, physical_address);
static int i;
//kprintf("allocated %d pages\n", ++i);
if(i++ < 10)
kprintf("%lx -> %lx\n", virtual_address, physical_address);
}
void init_paging(void) {
physalloc(370864, 0x0000000000, physical_allocator_callback);
kprintf("---------------------------------------------------\n");
//physalloc(17, 0xfffff800000000, physical_allocator_callback);
physalloc(20, 0xfffff800000000, physical_allocator_callback);
while(1);
// get the physical address of the pml4 table
uint64_t lower_half_ptr = ~0xffffffff80000000llu | (uint64_t)&pml4_table;
@ -93,7 +92,6 @@ void init_paging(void) {
// [63:MAXPHYADDR] must be 0!!! as 'lower_half_ptr' is supposed to
// be a physical address already, it should be the case
_cr3(lower_half_ptr);
// enable PAE in cr4
set_cr4(get_cr4() | CR0_PG_BIT);

146
kernel/memory/physical_allocator.c

@ -1,10 +1,12 @@
#include <stdint.h>
#include <stddef.h>
#include <stivale2.h>
#include <stdbool.h>
#include "../klib/string.h"
#include "../klib/sprintf.h"
#include "../debug/assert.h"
#include "../debug/panic.h"
#include "physical_allocator.h"
/**
* memory modeling:
@ -78,7 +80,7 @@ static struct memory_range memory_ranges_buffer[512];
static unsigned n_ranges = 0;
static unsigned n_available = 0;
static unsigned total_available_pages = 0;
// lists of memory ranges: sorted by biggest free range
static struct memory_range* mr_lists[4] = {0};
@ -95,9 +97,11 @@ static void init_memory_range(struct memory_range* range, uint64_t addr, size_t
// we use one page per region for the header
header->available[0] = (length-1);
header->available[1] = (length-1) >> 2;
header->available[2] = (length-1) >> 3;
header->available[3] = (length-1) >> 4;
// we have to ceil to take the last few pages in account
header->available[1] = (length-1 + 4-1) >> 2;
header->available[2] = (length-1 + 8-1) >> 3;
header->available[3] = (length-1 + 16-1) >> 4;
// choose the right linked list to insert the element in
// then insert it at the beginning: so the insertion is O(1)
@ -135,6 +139,7 @@ void init_physical_allocator(const struct stivale2_struct_tag_memmap* memmap) {
uint64_t base = e.base;
while(size > 0) {
size_t s;// the actual size of the current
@ -158,10 +163,10 @@ void init_physical_allocator(const struct stivale2_struct_tag_memmap* memmap) {
}
n_ranges = j;
n_available = total_pages - n_ranges;
total_available_pages = total_pages - n_ranges;
kprintf("initializeed %u MB of memory, %u ranges\n",
(total_pages - n_ranges) / 256, n_ranges);
kprintf("initializeed %u MB of memory, %u ranges, %u pages\n",
(total_pages - n_ranges) / 256, n_ranges, (total_pages - n_ranges));
}
@ -184,12 +189,16 @@ static struct memory_range* getMR(size_t requested_size, unsigned* max_block_siz
continue;
*max_block_size_id = i;
// return the first element, as it is in one of the memory range lists,
// we are sure that it is not full.
return range;
}
assert(0);
// it shouldn't happen
panic("internal physical allocator error");
__builtin_unreachable();
}
@ -247,7 +256,8 @@ static void alloc_page_bitmaps(struct MR_header* header, unsigned page) {
// calculate the new number of free blocks for all the sizes
//if((header->bitmap_level0[page/8] & mask_level0) == 0)
assert((header->bitmap_level0[page/8] & mask_level0) == 0);
header->available[0]--;
if((header->bitmap_level1[page/8/4] & mask_level1) == 0)
header->available[1]--;
@ -261,7 +271,6 @@ static void alloc_page_bitmaps(struct MR_header* header, unsigned page) {
assert(header->available[1] != 0xffffffff);
assert(header->available[0] != 0xffffffff);
kprintf("alloc_page_bitmaps - %ld\n", header->available[3]);
// actually set the bits
@ -315,31 +324,39 @@ static int move_region_if_full(struct memory_range* range,
// search for the number of available blocks
assert(list_id < 4);
unsigned available = header->available[list_id];
int return_value = 0;
kprintf("list_id %d, available %d - %d %d %d %d\r", list_id,available, header->available[0], header->available[1],header->available[2], header->available[3]);
// we can move down multiple times
while(1) {
unsigned available = header->available[list_id];
if(available)
return 0;
// the region isn't full
else {
// remove the range from the list
mr_lists[list_id] = range->next;
if(available)
return return_value;
// the region isn't full
else {
// remove the range from the list
mr_lists[list_id] = range->next;
if(list_id != 0) {
// if it isn't already
// on the weakest list,
// put it in the weaker list
range->next = mr_lists[list_id-1];
mr_lists[list_id-1] = range;
}
// else, just remove it.
// it won't be accessible
// and won't slow down the allocation
// of other segments
return_value = 1;
return 1;
if(list_id != 0) {
list_id--;
// if it isn't already
// on the weakest list,
// put it in the weaker list
range->next = mr_lists[list_id];
mr_lists[list_id] = range;
}
else
return 1;
// else, just remove it.
// it won't be accessible
// and won't slow down the allocation
// of other segments
}
}
}
@ -352,12 +369,21 @@ static int move_region_if_full(struct memory_range* range,
void physalloc(size_t size, void* virtual_addr, PHYSALLOC_CALLBACK callback) {
assert_aligned(virtual_addr, 0x1000);
if(total_available_pages < size) {
char buff[128];
sprintf(buff, "OUT OF MEMORY: "
"physalloc(size=%u,virtuall_addr=0x%lx,...)",
size, virtual_addr);
panic(buff);
}
total_available_pages -= size;
// loop through the MRs
while(size > 0) {
unsigned max_block_size_id = 0;
struct memory_range* range = getMR(size, &max_block_size_id);
unsigned memory_range_length = range->length;
const unsigned memory_range_length = range->length;
struct MR_header* header = range->base;
@ -374,12 +400,14 @@ void physalloc(size_t size, void* virtual_addr, PHYSALLOC_CALLBACK callback) {
// iterate over at least enough bits
unsigned current_block = 0;
unsigned total_blocks = memory_range_length / granularity;
// there is a last block that handles the very last few pages
const unsigned total_blocks = (memory_range_length+granularity-1) / granularity;
// loop through the selected level bitmap
// to find big regions
while(current_block < total_blocks) {
bitmap_chunk = *(bitmap_ptr++);
uint64_t free_chunks_bitmap = ~bitmap_chunk;
@ -387,12 +415,14 @@ void physalloc(size_t size, void* virtual_addr, PHYSALLOC_CALLBACK callback) {
if(free_chunks_bitmap == 0) // the whole chunk is full
current_block += 64;
else {
// loop up to 64 iterations
int i = 0;
// current block of the following loop
unsigned local_current_block = current_block;
unsigned local_current_block = current_block;
// extract the 0 bit positions from the bitmap chunk
// we know for sure that at least one bit is unset
for(;;i++) {
@ -400,17 +430,17 @@ void physalloc(size_t size, void* virtual_addr, PHYSALLOC_CALLBACK callback) {
unsigned ffs = __builtin_ffsll(free_chunks_bitmap);
// if none of the bits are set,
// lets check the 64 next ones
// if none of the bits are set,
// lets check the 64 next ones
if(ffs == 0) {
current_block += 64;
break;
}
// see https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html
// see https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html
unsigned bit_pos = ffs - 1;
local_current_block += bit_pos;
local_current_block = current_block + bit_pos;
// this 1 is not part of the bitmap
if(local_current_block >= total_blocks) {
@ -420,42 +450,55 @@ void physalloc(size_t size, void* virtual_addr, PHYSALLOC_CALLBACK callback) {
// clear the bit instead of redownloading the chunk
kprintf("%lx / %lx\n", free_chunks_bitmap, total_blocks);
free_chunks_bitmap &= ~(1llu << bit_pos);
// as the granularity isn't always 1, lets find the
// index the level0 bitmap of the found free region
unsigned curr_page = 1 + (current_block+bit_pos) * granularity;
unsigned curr_page = (local_current_block) * granularity;
// if we are done (size == 0 or curr_page == range->length)
// then exit the whole region travelling loop
bool done_travelling = false;
// loop through the region inside the lvl0 map
for(unsigned j = 0; j < granularity; j++) {
void* target_address = range->base + curr_page * 0x1000;
void* target_address = range->base + (curr_page+1) * 0x1000;
callback((uint64_t)target_address, (uint64_t)virtual_addr, 0x1000);
alloc_page_bitmaps(header, curr_page);
size--;
curr_page++;
virtual_addr += 0x1000;
if(size == 0)
goto _break_loop;
if(curr_page >= range->length || size == 0) {
done_travelling = true;
break;
// break the main traveling loop
}
}
if(done_travelling) {
current_block = local_current_block;
// update the current_block
// variable before breaking
break;
}
}
_break_loop:;
// now that we allocated stuff on the segment,
// check if it became full
if(move_region_if_full(range, header, max_block_size_id))
goto _pick_another_region;
if(move_region_if_full(range, header, max_block_size_id)) {
break;
// if size==0,
}
if(size == 0)
return;
}
}
_pick_another_region:;
}
}
@ -481,8 +524,13 @@ static const struct memory_range* get_memory_range(const void* addr) {
// A.base < addr < B.base
while(addr > A->base + A->length * 0x1000) {
unsigned c = (a+b) >> 1;
// start fetching for the next iteration
__builtin_prefetch(memory_ranges_buffer + ((a + c)>>1));
__builtin_prefetch(memory_ranges_buffer + ((c + b)>>1));
const struct memory_range* restrict C = &memory_ranges_buffer[c];
if(addr < C->base)
@ -508,4 +556,6 @@ void physfree(void* physical_page_addr) {
unsigned position = ((uint64_t)physical_page_addr - (uint64_t)range->base) / 0x1000;
free_page_bitmaps((struct MR_header *)range->base, position);
total_available_pages++;
}

Loading…
Cancel
Save