Mathieu Serandour 1 year ago
parent
commit
6e07eddcae
  1. 2
      Makefile
  2. 3
      kernel/debug/assert.h
  3. 3
      kernel/entry.c
  4. 25
      kernel/memory/paging.c
  5. 2
      kernel/memory/paging.h
  6. 370
      kernel/memory/physical_allocator.c
  7. 10
      kernel/memory/physical_allocator.h

2
Makefile

@ -9,7 +9,7 @@ USED_LOOPBACK := /dev/loop6
LIMINE_INSTALL := ./limine-bootloader/limine-install-linux-x86_64
QEMU_PATH := qemu-system-x86_64
QEMU_ARGS := -monitor stdio -bios /usr/share/ovmf/OVMF.fd -m 1024 -vga std -no-reboot
QEMU_ARGS := -monitor stdio -bios /usr/share/ovmf/OVMF.fd -m 114 -vga std -no-reboot
run: all
$(QEMU_PATH) $(QEMU_ARGS) -drive format=raw,file=$(HDD_FILE),id=disk,if=none \

3
kernel/debug/assert.h

@ -7,11 +7,12 @@ void __assert(const char* __restrict__ expression,
const int line);
#define assert(EX) (void)((EX) || (__assert (#EX, __FILE__, __LINE__),0))
#define assert_aligned(ADDR, ALIGNMENT) assert(((uint64_t)ADDR & (ALIGNMENT-1)) == 0)
#else
#define assert(EX)
#endif
#define static_assert(EX) _Static_assert(EX, #EX)
#define static_assert_equals(EX1,EX2) _Static_assert(EX1==EX2, #EX1 " != " #EX2)

3
kernel/entry.c

@ -14,6 +14,7 @@
#include "int/idt.h"
#include "memory/physical_allocator.h"
#include "memory/paging.h"
#define KERNEL_STACK_SIZE 8192
@ -194,7 +195,7 @@ void _start(struct stivale2_struct *stivale2_struct) {
kputs("DONE\n");
init_physical_allocator(memmap_tag);
init_paging();
apic_setup_clock();

25
kernel/memory/paging.c

@ -1,6 +1,8 @@
#include "paging.h"
#include "../debug/assert.h"
#include <stivale2.h>
#include "../debug/assert.h"
#include "physical_allocator.h"
#include "paging.h"
extern uint64_t get_cr0(void);
extern void set_cr0(uint64_t cr0);
@ -64,14 +66,23 @@ enum stivale2_mmap_type : uint32_t {
*/
void init_paging(const struct stivale2_struct_tag_memmap* memory_map) {
void physical_allocator_callback(uint64_t physical_address,
uint64_t virtual_address,
size_t size) {
for(unsigned i = 0; i < memory_map->entries; i++) {
const struct stivale2_mmap_entry* e = &memory_map->memmap[i];
//kprintf("%lx -> %lx\r", virtual_address, physical_address);
}
void init_paging(void) {
kprintf("%0x16x - %0x16x - %d\n", e->base, e->base+e->length, e->type);
}
physalloc(370864, 0x0000000000, physical_allocator_callback);
kprintf("---------------------------------------------------\n");
//physalloc(17, 0xfffff800000000, physical_allocator_callback);
while(1);
// get the physical address of the pml4 table
uint64_t lower_half_ptr = ~0xffffffff80000000llu | (uint64_t)&pml4_table;

2
kernel/memory/paging.h

@ -48,7 +48,7 @@ struct stivale2_struct_tag_memmap;
* enable PML4 4K paging
*
*/
void init_paging(const struct stivale2_struct_tag_memmap *);
void init_paging(void);
/**
* map pages from a given physical address to a given virtual address
* map 'count' continuous pages

370
kernel/memory/physical_allocator.c

@ -1,9 +1,11 @@
#include <stdint.h>
#include <stddef.h>
#include <stivale2.h>
#include "../debug/assert.h"
#include "../klib/string.h"
#include "../klib/sprintf.h"
#include "../debug/assert.h"
#include "physical_allocator.h"
/**
* memory modeling:
* | MR1 | MR2 | //// | MR3 | ....
@ -65,22 +67,28 @@ struct MR_header {
uint8_t bitmap_level0[2048];
};
// number of ranges considered
static_assert_equals(sizeof(struct MR_header), 4096);
// 512 x 64MB regions: up to 32 Go of RAM
struct memory_range buffer[512];
// this buffer is sorted by base address
static struct memory_range memory_ranges_buffer[512];
static unsigned n_ranges = 0;
static unsigned n_available = 0;
// lists of memory ranges: sorted by biggest free range
struct memory_range* mr_lists[4] = {0};
static struct memory_range* mr_lists[4] = {0};
// init memory range as a free range, ready to be allocated
static void init_memory_range(struct memory_range* range, uint64_t addr, size_t length) {
// init the linked list's structure
range->base = addr;
range->length = length;
range->base = (void *)addr;
range->length = length-1;
// init the MR header
struct MR_header* header = (struct MR_header *)addr;
@ -94,7 +102,7 @@ static void init_memory_range(struct memory_range* range, uint64_t addr, size_t
// choose the right linked list to insert the element in
// then insert it at the beginning: so the insertion is O(1)
for(int i = 3; i >= 0; i--) {
if(header->available[3] == 0)
if(header->available[i] == 0)
continue;
range->next = mr_lists[i];
@ -104,7 +112,7 @@ static void init_memory_range(struct memory_range* range, uint64_t addr, size_t
// zero all the bit maps
memset(((uint8_t*)header)+1152, 0, 2048);
memset(((uint8_t*)header)+1152, 0, 2048+512+256+128);
}
@ -120,8 +128,7 @@ void init_physical_allocator(const struct stivale2_struct_tag_memmap* memmap) {
// dont take kernel & modules or acpi reclaimable
// memory ranges in account
if(e.type == STIVALE2_MMAP_USABLE ||
e.type == STIVALE2_MMAP_BOOTLOADER_RECLAIMABLE ||
e.type == STIVALE2_MMAP_FRAMEBUFFER) {
e.type == STIVALE2_MMAP_BOOTLOADER_RECLAIMABLE) {
// ceil the size to the number of pages
size_t size = (e.length + 4096 - 1) / 4096;
@ -139,11 +146,8 @@ void init_physical_allocator(const struct stivale2_struct_tag_memmap* memmap) {
else
s = size;
struct memory_range* range = &buffer[j++];
struct memory_range* range = &memory_ranges_buffer[j++];
init_memory_range(range, base, s);
kprintf("%l16x: %d pages\n", range->base, range->length);
total_pages += s;
size -= s;
@ -152,28 +156,356 @@ void init_physical_allocator(const struct stivale2_struct_tag_memmap* memmap) {
}
}
}
kprintf("detected %u MB of memory, %u usable\n", total_pages / 256, (total_pages - j) / 256);
n_ranges = j;
n_available = total_pages - n_ranges;
kprintf("initializeed %u MB of memory, %u ranges\n",
(total_pages - n_ranges) / 256, n_ranges);
}
// return a MR that fits best the requested size
// it can however not fit it if no MR does
static void getMR(size_t requested_size) {
// max_block_size_id: index of the linked list in which we found the MR
// the returned page is always the first element of the max_block_size_id th list
static struct memory_range* getMR(size_t requested_size, unsigned* max_block_size_id) {
// we don't use the size yet
(void) requested_size;
// worst fit: always return the memory range for which we know that
// a big free contiguous memory range is available
for(int i = 3; i >= 0; i--) {
struct memory_range* range = mr_lists[i];
if(range == NULL)
continue;
*max_block_size_id = i;
// return the first element, as it is in one of the memory range lists,
// we are sure that it is not full.
return range;
}
assert(0);
}
/**
* @brief select the bitmap to use to allocate a block
* this function is to decode the max_block_size_id value
* to select the right (ith) bitmap in the structure
*
* the function also gives the granularity of the returned bitmap
*
* @param header the header of the memory range structure
* @param max_block_size_id the id of the linked list in which the memory
* range has been found
* @param granularity [output] granularity of the selected bitmap
* @return void*
*/
static void* select_bitmap(struct MR_header* header,
unsigned max_block_size_id,
unsigned* granularity) {
switch (max_block_size_id)
{
case 0:
*granularity = 1;
return header->bitmap_level0;
break;
case 1:
*granularity = 4;
return header->bitmap_level1;
break;
case 2:
*granularity = 8;
return header->bitmap_level2;
break;
case 3:
*granularity = 16;
return header->bitmap_level3;
break;
default:
assert(0);
return NULL;
}
}
// modifies the bitmaps to allocate
// the ith page in the 64 MB segment
static void alloc_page_bitmaps(struct MR_header* header, unsigned page) {
// process on bytes: calculate the right masks
uint8_t mask_level0 = 1 << page % 8,
mask_level1 = 1 << (page / 4) % 8,
mask_level2 = 1 << (page / 8) % 8,
mask_level3 = 1 << (page / 16) % 8;
// calculate the new number of free blocks for all the sizes
//if((header->bitmap_level0[page/8] & mask_level0) == 0)
header->available[0]--;
if((header->bitmap_level1[page/8/4] & mask_level1) == 0)
header->available[1]--;
if((header->bitmap_level2[page/8/8] & mask_level2) == 0)
header->available[2]--;
if((header->bitmap_level3[page/8/16] & mask_level3) == 0)
header->available[3]--;
assert(header->available[3] != 0xffffffff);
assert(header->available[2] != 0xffffffff);
assert(header->available[1] != 0xffffffff);
assert(header->available[0] != 0xffffffff);
kprintf("alloc_page_bitmaps - %ld\n", header->available[3]);
// actually set the bits
header->bitmap_level0[page/8] |= mask_level0;
header->bitmap_level1[page/8/4] |= mask_level1;
header->bitmap_level2[page/8/8] |= mask_level2;
header->bitmap_level3[page/8/16] |= mask_level3;
}
// modifies the bitmaps to free
// the ith page in the 64 MB segment
static void free_page_bitmaps(struct MR_header* header, unsigned page) {
// process on bytes: calculate the right masks
uint8_t mask_level0 = 1 << page % 8,
mask_level1 = 1 << (page / 4) % 8,
mask_level2 = 1 << (page / 8) % 8,
mask_level3 = 1 << (page / 16) % 8;
// unset the corresponding bits
header->bitmap_level0[page/8] &= ~mask_level0;
header->bitmap_level1[page/8/4] &= ~mask_level1;
header->bitmap_level2[page/8/8] &= ~mask_level2;
header->bitmap_level3[page/8/16] &= ~mask_level3;
// calculate the new number of free blocks for all the sizes
//if((header->bitmap_level0[page/8] & mask_level0) == 0)
header->available[0]++;
if((header->bitmap_level1[page/8/4] & mask_level1) == 0)
header->available[1]++;
if((header->bitmap_level2[page/8/8] & mask_level2) == 0)
header->available[2]++;
if((header->bitmap_level3[page/8/16] & mask_level3) == 0)
header->available[3]++;
}
// move the region from one list to another
// return 1 if the region is full
/// !!!!!!!!!!!!!! WARNING !!!!!!!!!!!!!
/// !!! THE RANGE MUST BE ON THE !!!
/// !!! TOP OF ITS LIST !!!
/// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
static int move_region_if_full(struct memory_range* range,
const struct MR_header* restrict header,
unsigned list_id) {
// search for the number of available blocks
assert(list_id < 4);
unsigned available = header->available[list_id];
kprintf("list_id %d, available %d - %d %d %d %d\r", list_id,available, header->available[0], header->available[1],header->available[2], header->available[3]);
if(available)
return 0;
// the region isn't full
else {
// remove the range from the list
mr_lists[list_id] = range->next;
if(list_id != 0) {
// if it isn't already
// on the weakest list,
// put it in the weaker list
range->next = mr_lists[list_id-1];
mr_lists[list_id-1] = range;
}
// else, just remove it.
// it won't be accessible
// and won't slow down the allocation
// of other segments
return 1;
}
}
/**
* @brief allocate
*
* @param size in 4K pages
* @return void* the physical allocated address
*/
void* physalloc(size_t size) {
void physalloc(size_t size, void* virtual_addr, PHYSALLOC_CALLBACK callback) {
assert_aligned(virtual_addr, 0x1000);
// loop through the MRs
while(size > 0) {
unsigned max_block_size_id = 0;
struct memory_range* range = getMR(size, &max_block_size_id);
unsigned memory_range_length = range->length;
struct MR_header* header = range->base;
// use the bitmap that maximizes the granularity,
// in accordance with the getMR() result
// maximizing the granularity is essential to reduce the traveling time
// through a bitmap
unsigned granularity = 0;
uint64_t* bitmap_ptr = select_bitmap(header, max_block_size_id, &granularity);
// extract bitmap chunks, 64 per 64
uint64_t bitmap_chunk;
// iterate over at least enough bits
unsigned current_block = 0;
unsigned total_blocks = memory_range_length / granularity;
// loop through the selected level bitmap
// to find big regions
while(current_block < total_blocks) {
bitmap_chunk = *(bitmap_ptr++);
uint64_t free_chunks_bitmap = ~bitmap_chunk;
// keep traveling through the bitmap as long as no bit of the bitmap_chunk is full
if(free_chunks_bitmap == 0) // the whole chunk is full
current_block += 64;
else {
// loop up to 64 iterations
int i = 0;
// current block of the following loop
unsigned local_current_block = current_block;
// extract the 0 bit positions from the bitmap chunk
// we know for sure that at least one bit is unset
for(;;i++) {
unsigned ffs = __builtin_ffsll(free_chunks_bitmap);
// if none of the bits are set,
// lets check the 64 next ones
if(ffs == 0) {
current_block += 64;
break;
}
// see https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html
unsigned bit_pos = ffs - 1;
local_current_block += bit_pos;
// this 1 is not part of the bitmap
if(local_current_block >= total_blocks) {
current_block = local_current_block;
break;
}
// clear the bit instead of redownloading the chunk
kprintf("%lx / %lx\n", free_chunks_bitmap, total_blocks);
free_chunks_bitmap &= ~(1llu << bit_pos);
// as the granularity isn't always 1, lets find the
// index the level0 bitmap of the found free region
unsigned curr_page = 1 + (current_block+bit_pos) * granularity;
// loop through the region inside the lvl0 map
for(unsigned j = 0; j < granularity; j++) {
void* target_address = range->base + curr_page * 0x1000;
callback((uint64_t)target_address, (uint64_t)virtual_addr, 0x1000);
alloc_page_bitmaps(header, curr_page);
size--;
curr_page++;
virtual_addr += 0x1000;
if(size == 0)
goto _break_loop;
// break the main traveling loop
}
}
_break_loop:;
// now that we allocated stuff on the segment,
// check if it became full
if(move_region_if_full(range, header, max_block_size_id))
goto _pick_another_region;
// if size==0,
if(size == 0)
return;
}
}
_pick_another_region:;
}
}
// return the memory range in which the given page is
// contained
static const struct memory_range* get_memory_range(const void* addr) {
// dichotomy
unsigned int a = 0,
b = n_ranges-1;
const struct memory_range* restrict A = &memory_ranges_buffer[0],
* restrict B = &memory_ranges_buffer[n_ranges-1];
// check the inital bounds
if(addr > B->base) {
// length is in pages
assert(addr < B->base+B->length * 0x1000);
return B;
}
assert(addr > A->base);
// A.base < addr < B.base
while(addr > A->base + A->length * 0x1000) {
unsigned c = (a+b) >> 1;
const struct memory_range* restrict C = &memory_ranges_buffer[c];
if(addr < C->base)
B = C;
else
A = C;
}
// the address shouldn't be A->base (which is the header of the segment)
assert(addr != A->base);
return A;
}
/**
* size is in pages
*
*/
void physfree(void* virtual_addr, size_t size) {
(void) (virtual_addr + size);
}
void physfree(void* physical_page_addr) {
assert_aligned(physical_page_addr, 0x1000);
const struct memory_range* range = get_memory_range(physical_page_addr);
unsigned position = ((uint64_t)physical_page_addr - (uint64_t)range->base) / 0x1000;
free_page_bitmaps((struct MR_header *)range->base, position);
}

10
kernel/memory/physical_allocator.h

@ -1,11 +1,17 @@
#pragma once
#include <stddef.h>
#include <stdint.h>
struct stivale2_struct_tag_memmap;
// init the physical allocator with the stivale2 memory map
void init_physical_allocator(const struct stivale2_struct_tag_memmap* memmap);
void* physalloc(size_t size);
void physfree(void* virtual_addr, size_t size);
typedef void (*PHYSALLOC_CALLBACK)(uint64_t physical_address, uint64_t virtual_address, size_t size);
void physalloc(size_t size, void* virtual_addr, PHYSALLOC_CALLBACK callback);
void physfree(void* physical_page_addr);
Loading…
Cancel
Save