Browse Source

almost done with the pmm

master
Mathieu Serandour 1 year ago
parent
commit
6c7928c99a
  1. 445
      kernel/memory/paging.c
  2. 73
      kernel/memory/paging.h
  3. 61
      kernel/memory/physical_allocator.c
  4. 33
      kernel/memory/physical_allocator.h
  5. 93
      kernel/memory/vmap.h

445
kernel/memory/paging.c

@ -1,8 +1,13 @@
#include <stivale2.h>
#include "../debug/assert.h"
#include "../klib/string.h"
#include "physical_allocator.h"
#include "paging.h"
#include "vmap.h"
#include "../klib/sprintf.h"
#include "../debug/panic.h"
#include "../debug/dump.h"
extern uint64_t get_cr0(void);
extern void set_cr0(uint64_t cr0);
@ -12,89 +17,445 @@ extern void _cr3(uint64_t cr3);
extern uint64_t get_cr4(void);
extern void set_cr4(uint64_t cr4);
#define CR0_PG_BIT (1lu << 31)
#define CR0_PG_BIT (1lu << 31)
#define CR4_PAE_BIT (1lu << 5)
#define CR4_PCIDE (1lu << 17)
/**
* 4th level table (pde) entry
*/
typedef const void* pte;
typedef void* pte;
/**
* 3rd level table (pde) entry
*/
typedef const pte* pde;
typedef pte* pde;
/**
* 2nd level table (pdpt) entry
*
*/
typedef const pde* pdpte;
typedef pde* pdpte;
/**
* 1st level table (pml4) entry
*/
typedef const pdpte* pml4e;
typedef pdpte* pml4e;
#define __page __attribute__((aligned(4096)))
static pml4e pml4[512] __page = {0};
// 1st entry 0 -> 0x0000007fffffffff
static pdpte pdpt_low[512] __page = {0};
// 256th 0xffff800000000000 -> 0xffff807fffffffff
static pdpte pdpt_mid[512] __page = {0};
// 511st entry 0xffffff8000000000 -> 0xffffffffffffffff
static pdpte pdpt_high[512] __page = {0};
static_assert_equals(sizeof(pml4), 0x1000);
static_assert_equals(sizeof(pdpt_high), 0x1000);
static_assert_equals(sizeof(pdpt_mid), 0x1000);
static_assert_equals(sizeof(pdpt_low), 0x1000);
// the alloc_page_table function has the right
// to realloc on the fly
// unset it to avoid nasty recursions
static int alloc_page_table_realloc = 1;
static void fill_page_table_allocator_buffer(size_t n);
// extract the offset of the page table
// from a virtual address
static uint32_t pt_offset(uint64_t virt) {
return (virt >> 12) & 0x1ff;
}
// extract the offset of the page directory
// from a virtual address
static uint32_t pd_offset(uint64_t virt) {
return (virt >> 21) & 0x1ff;
}
// extract the offset of the pdp
// from a virtual address
static uint32_t pdpt_offset(uint64_t virt) {
return (virt >> 30) & 0x1ff;
}
// extract the offset of the page directory
// from a virtual address
static uint32_t pml4_offset(uint64_t virt) {
return (virt >> 39) & 0x1ff;
}
// PWT: page level write-through
// PCD: page level cache disable
static void* create_table_entry(void* entry, unsigned flags) {
assert_aligned(entry, 0x1000);
return (void*)(flags |
(uint64_t)entry);
}
// extract the pointer from an entry of
// a table structure
static void* extract_pointer(void* c) {
return (void*)(0x000ffffffffff000llu & (uint64_t)c);
}
// the current page flags
//static unsigned current_page_flags;
pml4e pml4_table[512] __attribute__((aligned(4096))) = {0};
/*
struct stivale2_struct_tag_memmap {
struct stivale2_tag tag; // Identifier: 0x2187f79e8612de07
uint64_t entries; // Count of memory map entries
struct stivale2_mmap_entry memmap[]; // Array of memory map entries
};
struct stivale2_mmap_entry {
uint64_t base; // Physical address of base of the memory section
uint64_t length; // Length of the section
uint32_t type; // Type (described below)
uint32_t unused;
};
enum stivale2_mmap_type : uint32_t {
USABLE = 1,
RESERVED = 2,
ACPI_RECLAIMABLE = 3,
ACPI_NVS = 4,
BAD_MEMORY = 5,
BOOTLOADER_RECLAIMABLE = 0x1000,
KERNEL_AND_MODULES = 0x1001,
FRAMEBUFFER = 0x1002
};
static void physical_allocator_callback_kernel(
uint64_t physical_address,
uint64_t virtual_address,
size_t size) {
assert(is_kernel_data(virtual_address));
(void)(physical_address + virtual_address + size);
}
void physical_allocator_callback_user(
uint64_t physical_address,
uint64_t virtual_address,
size_t size) {
assert(is_user(virtual_address));
(void)(physical_address + virtual_address + size);
}
void physical_allocator_callback_mmio(
uint64_t physical_address,
uint64_t virtual_address,
size_t size) {
assert(is_mmio(virtual_address));
(void)(physical_address + virtual_address + size);
}
*/
// map the data of the allocator
static void map_physical_memory(const struct stivale2_struct_tag_memmap* memmap) {
// as we are not in a callback function,
// we can realloc page tables on the fly
alloc_page_table_realloc = 1;
for(unsigned i = 0; i < memmap->entries; i++) {
const struct stivale2_mmap_entry* e = &memmap->memmap[i];
if(e->type == STIVALE2_MMAP_USABLE || e->type == STIVALE2_MMAP_BOOTLOADER_RECLAIMABLE) {
// be inclusive!
uint64_t phys_addr = (uint64_t) (e->base / 0x1000) * 0x1000;
size_t size = (e->length + (e->base - phys_addr) + 0x0fff) / 0x1000;
if(size == 0)
continue;
uint64_t virtual_addr = translate_address(phys_addr);
map_pages(phys_addr, virtual_addr, size, PRESENT_ENTRY);
// use the allocator to allocate page tables
// to map its own data
}
}
void physical_allocator_callback(uint64_t physical_address,
uint64_t virtual_address,
size_t size) {
}
/*
static void map_allocator_data(void) {
const struct physical_allocator_data_page_entry* entries;
size_t size = 0;
void init_paging(void) {
// as we are not in a callback function,
// we can realloc page tables on the fly
alloc_page_table_realloc = 1;
entries = physical_allocator_data_pages(&size);
for(unsigned i = 0; i < size; i++) {
uint64_t phys_addr = (uint64_t) entries[i].physical_address;
uint64_t virtual_addr = TRANSLATED_PHYSICAL_MEMORY_BEGIN | phys_addr;
// get the physical address of the pml4 table
uint64_t lower_half_ptr = ~0xffffffff80000000llu | (uint64_t)&pml4_table;
map_pages(phys_addr, virtual_addr, 1, PRESENT_ENTRY);
// use the allocator to allocate page tables
// to map its own data
}
}
*/
// map the kernel to KERNEL_DATA_BEGIN | kernel_phys_base
static void map_kernel(const struct stivale2_struct_tag_memmap* memmap) {
// count the number of kernel entries
// suppose the first one is the .text section,
// the second one is rodata
// and the third is data+bss
int n = 0;
for(unsigned i = 0; i < memmap->entries; i++) {
const struct stivale2_mmap_entry* e = &memmap->memmap[i];
if(e->type == STIVALE2_MMAP_KERNEL_AND_MODULES) {
// floor the base to one page
uint64_t base = e->base & ~0x0fff;
uint64_t virtual_addr = base | KERNEL_DATA_BEGIN;
// ceil the size
size_t size = e->length + (e->base - base);
size = (size+0x0fff) / 0x1000;
unsigned flags = PRESENT_ENTRY;
switch (n++)
{
case 0:
/* .text */
flags |= PL_XD;
break;
case 1:
/* rodata */
flags |= PL_RW;
break;
default:
break;
}
//alloc the page table pages
// before doing any allocation
fill_page_table_allocator_buffer(64);
map_pages(base, virtual_addr, size, flags);
}
}
}
// return non 0 value iif the entry is
// present
static int present_entry(void* entry) {
return (uint64_t)entry & PRESENT_ENTRY;
}
/*
function for debug purposes
static void print_struct(int level, void** table, uint64_t virt) {
uint64_t* addr = table;
//if(level > 1)
// return ;
while(1);
for(int i = 0; i < 512; i++) {
if(present_entry(addr[i])) {
uint64_t v = (virt << 9) | i;
for(int i = 0; i < level; i++)
kputs("-");
// Page level Write Through (PWT) 0
// Page level Cache Disable (PCD) 0
// [63:MAXPHYADDR] must be 0!!! as 'lower_half_ptr' is supposed to
// be a physical address already, it should be the case
_cr3(lower_half_ptr);
if(level == 3) {
kprintf(" %lx -> %lx\n", v << 12, extract_pointer(addr[i]));
}
else {
kputs("\n");
print_struct(level+1, extract_pointer(addr[i]), v);
}
}
}
}
*/
void init_paging(const struct stivale2_struct_tag_memmap* memmap) {
// init the highest paging structures
// no need to use the translate_address macro
// as we are still in the early memory configuration
// so memory is both identity mapped and transtated
// so *x = *translate_address(x)
pml4[0] = create_table_entry(
(void*)early_virtual_to_physical(pdpt_low),
PRESENT_ENTRY
);
pml4[256] = create_table_entry(
(void*)early_virtual_to_physical(pdpt_mid),
PRESENT_ENTRY | PL_US
);
// the high half memory is supervisor only
// so that no user can access it eventhough the entry
// stays in the pml4!
pml4[511] = create_table_entry(
(void*)early_virtual_to_physical(pdpt_high),
PRESENT_ENTRY | PL_US
);
// map all the memory to 0xffff800000000000
map_physical_memory(memmap);
// everytime we allocate a page table
// while allocating some memory,
// we need to put this to 0
// in order to avoid awful recursion bugs
alloc_page_table_realloc = 0;
//print_struct(0, pml4, 0);
// map the kernel
map_kernel(memmap);
// get the physical address of the pml4 table
uint64_t pml4_lower_half_ptr = early_virtual_to_physical(pml4);
// enable PAE in cr4
set_cr4(get_cr4() | CR0_PG_BIT);
// disable PCIDE
set_cr4((get_cr4() | CR4_PAE_BIT) & ~CR4_PCIDE);
// enable the PG bit
set_cr0(get_cr0() | CR0_PG_BIT);
_cr3(pml4_lower_half_ptr);
*((uint64_t*)0xffff800080000000) = 40;
while ((1));
kprintf("hello from the future\n");
}
// buffer the allocation requests
// ask for 16 pages per call is the
// optimal thing as its the granularity
// of the highest level bitmap
static void* page_table_allocator_buffer[64];
static size_t page_table_allocator_buffer_size = 0;
// callback for the page allocator
static void page_table_allocator_callback(uint64_t phys_addr,
uint64_t virt_addr,
size_t size) {
(void)(size+virt_addr); // the size is always one whatsoever...
assert(phys_addr < 0x80000000);
page_table_allocator_buffer[page_table_allocator_buffer_size++] = (void*)phys_addr;
}
void map_pages(uint64_t physical_addr, uint64_t virtual_addr, size_t count) {
(void) (physical_addr + virtual_addr + count);
/*
static void zero_page_table_page(void* physical_address) {
assert_aligned(physical_address, 0x1000);
memset(translate_address(physical_address), 0, 0x1000);
}
*/
// fill the page table allocator buffer
static void fill_page_table_allocator_buffer(size_t n) {
assert(n <= 64);
int to_alloc = n - page_table_allocator_buffer_size;
if(to_alloc < 0)
return;
int old_size = page_table_allocator_buffer_size;
physalloc(to_alloc, 0, page_table_allocator_callback);
//for(unsigned i = old_size; i < n; i++)
// zero_page_table_page(page_table_allocator_buffer[i]);
page_table_allocator_buffer_size = n;
}
// return a newly allocated zeroed page
static void* alloc_page_table(void) {
if(! page_table_allocator_buffer_size) {
if(!alloc_page_table_realloc)
panic(
"alloc_page_table(): out of buffered pages, unable to allocate "
"page tables"
);
physalloc(16, 0, page_table_allocator_callback);
page_table_allocator_buffer_size = 16;
//for(int i = 0; i < 16; i++)
// zero_page_table_page(page_table_allocator_buffer[i]);
}
return page_table_allocator_buffer[--page_table_allocator_buffer_size];
}
static void* get_entry_or_allocate(void** restrict table, unsigned index) {
assert(index < 512);
uint64_t* virtual_addr_table = translate_address(table);
void* entry = virtual_addr_table[index];
if(!present_entry(entry)) {
uint64_t e = create_table_entry(
alloc_page_table(),
PRESENT_ENTRY);
return virtual_addr_table[index] = e;
}
else
return entry;
}
void map_pages(uint64_t physical_addr,
uint64_t virtual_addr,
size_t count,
unsigned flags) {
while(count > 0) {
// fetch table indexes
unsigned pml4i = pml4_offset(virtual_addr),
pdpti = pdpt_offset(virtual_addr),
pdi = pd_offset(virtual_addr),
pti = pt_offset(virtual_addr);
assert(pml4i == 0 || pml4i == 511 || pml4i == 256);
// those entries should exist
pml4e restrict pml4entry = extract_pointer(get_entry_or_allocate((void**)pml4, pml4i));
pdpte restrict pdptentry = extract_pointer(get_entry_or_allocate((void**)pml4entry, pdpti));
pde restrict pdentry = extract_pointer(get_entry_or_allocate((void**)pdptentry, pdi));
while(count > 0 && pti < 512) {
// create a new entry
uint64_t e = create_table_entry((void*)physical_addr,flags);
((void**)translate_address(pdentry))[pti] = e;
pti++;
count--;
physical_addr += 0x1000;
virtual_addr += 0x1000;
}
}
}

73
kernel/memory/paging.h

@ -5,53 +5,44 @@
struct stivale2_struct_tag_memmap;
//11111111111
//00100010001
//11011101110
/**
* memory modeling:
* | MR1 | MR2 | //// | MR3 | ....
* MR = 64 MB memory range
*
* MR linked list entry: {
* MR_base, // base address of the MR
* MR_length, // length, in pages
* MR_next, // the next MR in the list, or null
* }
*
* 4 global linked lists:
* memory_list[i] = MR linked list entry *
* ML0: at least 1 64K contiguous available range (very fast allocation)
* ML1: at least 1 32K contiguous available range (fast allocation)
* ML1: at least 1 16K contiguous available range (medium allocation)
* ML1: at least 1 4K contiguous available range (slow allocation)
*
*
* MR layout;
* | ----------------------------- 4K HEADER --------------------------- | BLOCKS |
* | 1152 header | 128 bitmap3 | 256 bitmap2 | 512 bitmap1 | 2K bitmap0 | 64 MB - 4K pages |
*
* header: {
* size, // of the range, in pages
* rem0, // number of free pages (4k)
* rem1, // number of free super pages (16k)
* rem2, // number of free mega pages (32k)
* rem3, // number of free ultra pages (64k)
* }
*
*
*/
/**
* enable PML4 4K paging
* identity maps all addressable the memory except kernel
* executable
*
* map the kernel executable to high half: 0xffffffff80000000 + phys
*/
void init_paging(void);
void init_paging(const struct stivale2_struct_tag_memmap* memmap);
// page table flags
// the entry is present
#define PRESENT_ENTRY 1
// read only
#define PL_RW 2
// supervisor only
#define PL_US 4
// page level write through
#define PWT 8
// page level cache disable
#define PCD 16
#define PL_XD (1llu << 63)
/**
* map pages from a given physical address to a given virtual address
* map 'count' continuous pages
*
* flags can be one of the following:
* - PRESENT_ENTRY
* - PL_RW: read only
* - US: supervisor (should not be used here)
* - PWT: page level write through
* - PCD: page level cache disable
*/
void map_pages(uint64_t physical_addr, uint64_t virtual_addr, size_t count);
void map_pages(uint64_t physical_addr,
uint64_t virtual_addr,
size_t count,
unsigned flags);

61
kernel/memory/physical_allocator.c

@ -8,6 +8,8 @@
#include "../debug/assert.h"
#include "../debug/panic.h"
#include "physical_allocator.h"
#include "vmap.h"
/**
* memory modeling:
* | MR1 | MR2 | //// | MR3 | ....
@ -48,6 +50,7 @@
// drop the segments that are too small
#define MIN_SIZE 10
//linked list element representing a 64 MB memory region
struct memory_range {
void* base;
@ -69,7 +72,6 @@ struct MR_header {
uint8_t bitmap_level0[2048];
};
// number of ranges considered
static_assert_equals(sizeof(struct MR_header), 4096);
@ -78,6 +80,7 @@ static_assert_equals(sizeof(struct MR_header), 4096);
// this buffer is sorted by base address
static struct memory_range memory_ranges_buffer[512];
// number of ranges considered
static unsigned n_ranges = 0;
static unsigned total_available_pages = 0;
@ -87,13 +90,28 @@ static struct memory_range* mr_lists[4] = {0};
// init memory range as a free range, ready to be allocated
// it should only be called with early memory configuration:
// identity mapped stuf
static void init_memory_range(struct memory_range* range, uint64_t addr, size_t length) {
assert_aligned(addr, 0x1000);
// init the linked list's structure
range->base = (void *)addr;
range->length = length-1;
// init the MR header
struct MR_header* header = (struct MR_header *)addr;
kprintf("%x, %x\n", header, length);
// zero all the bit maps
memset(((uint8_t*)header), 0, 0x1000 * (length));
kprintf("issse\n");
// we use one page per region for the header
header->available[0] = (length-1);
@ -115,8 +133,7 @@ static void init_memory_range(struct memory_range* range, uint64_t addr, size_t
}
// zero all the bit maps
memset(((uint8_t*)header)+1152, 0, 2048+512+256+128);
}
@ -131,13 +148,22 @@ void init_physical_allocator(const struct stivale2_struct_tag_memmap* memmap) {
// dont take kernel & modules or acpi reclaimable
// memory ranges in account
if(e.type == STIVALE2_MMAP_USABLE ||
e.type == STIVALE2_MMAP_BOOTLOADER_RECLAIMABLE) {
if(e.type == STIVALE2_MMAP_USABLE) {
// ceil the size to the number of pages
size_t size = (e.length + 4096 - 1) / 4096;
uint64_t base = e.base;
size_t size = e.length;
if(base & 0x0fff) {
uint64_t new_base = (base & 0x0fff) + 0x1000;
size = new_base - base;
base = new_base;
}
// floor the size
size /= 4096;
while(size > 0) {
@ -244,6 +270,11 @@ static void* select_bitmap(struct MR_header* header,
}
}
// return a pointer to the MR header
static struct MR_header* get_header_base(const struct memory_range* range) {
return range->base;
}
// modifies the bitmaps to allocate
// the ith page in the 64 MB segment
static void alloc_page_bitmaps(struct MR_header* header, unsigned page) {
@ -385,7 +416,7 @@ void physalloc(size_t size, void* virtual_addr, PHYSALLOC_CALLBACK callback) {
const unsigned memory_range_length = range->length;
struct MR_header* header = range->base;
struct MR_header* header = get_header_base(range);
// use the bitmap that maximizes the granularity,
// in accordance with the getMR() result
@ -465,6 +496,7 @@ void physalloc(size_t size, void* virtual_addr, PHYSALLOC_CALLBACK callback) {
for(unsigned j = 0; j < granularity; j++) {
void* target_address = range->base + (curr_page+1) * 0x1000;
callback((uint64_t)target_address, (uint64_t)virtual_addr, 0x1000);
alloc_page_bitmaps(header, curr_page);
@ -555,7 +587,20 @@ void physfree(void* physical_page_addr) {
unsigned position = ((uint64_t)physical_page_addr - (uint64_t)range->base) / 0x1000;
free_page_bitmaps((struct MR_header *)range->base, position);
free_page_bitmaps(get_header_base(range), position);
memset(translate_address(physical_page_addr), 0, 0x1000);
total_available_pages++;
}
static_assert_equals(
sizeof(struct physical_allocator_data_page_entry),
sizeof(struct memory_range));
const struct physical_allocator_data_page_entry*
physical_allocator_data_pages(size_t* size) {
*size = n_ranges;
return (struct physical_allocator_data_page_entry *)memory_ranges_buffer;
}

33
kernel/memory/physical_allocator.h

@ -6,12 +6,41 @@
struct stivale2_struct_tag_memmap;
// init the physical allocator with the stivale2 memory map
// first phase of initialization:
// suppose that all the addressable memory is identity mapped
void init_physical_allocator(const struct stivale2_struct_tag_memmap* memmap);
//linked list element representing a 64 MB memory region
struct physical_allocator_data_page_entry {
void* physical_address;
typedef void (*PHYSALLOC_CALLBACK)(uint64_t physical_address, uint64_t virtual_address, size_t size);
uint64_t reserved[2];
};
// the physalloc / physfree functions require these
// pages of every range to be mapped accordingly to the
// vmap.h specifications:
//
// physical_address should be mapped to physical_address | 0x
//
// size: the number of entries in the returned array
const struct physical_allocator_data_page_entry*
physical_allocator_data_pages(size_t* size);
typedef void (*PHYSALLOC_CALLBACK)(
uint64_t physical_address,
uint64_t virtual_address,
size_t size);
void physalloc(size_t size, void* virtual_addr, PHYSALLOC_CALLBACK callback);
void physfree(void* physical_page_addr);
void physfree(void* physical_page_addr);
// return the number of available pages in the system
int available_pages(void);
// return the pages in the system
int total_pages(void);

93
kernel/memory/vmap.h

@ -0,0 +1,93 @@
#pragma once
#include <stdint.h>
/**
* general virtual memory map for Bincows:
*
* 0 ------------------
* | USER |
* | MEMORY |
* 0x0000007fffffffff |----------------|
* | |
* 0xffff800000000000 |----------------|
* | TRANSLATED |
* | MEMORY |
* 0xffff807fffffffff |----------------|
* | |
* 0xffffffff00000000 |----------------|
* | MMIO |
* 0xffffffff80000000 |----------------|
* | KERNEL |
* | DATA |
* 0xffffffffffffffff ------------------
*
*
* the allocator and the page table manager
* share the same virtual space, in a translated
* physical memory space
*
*
*
*/
/**
* early virtual memory map for Bincows:
*
*
* 0 ------------------
* | |
* | |
* 0xffff800000000000 |----------------|
* | TRANSLATED |
* | MEMORY |
* 0xffff8c0000000000 |----------------|
* | |
* 0xffffffff80000000 |----------------|
* | KERNEL |
* | DATA |
* 0xffffffffffffffff ------------------
*
*
*/
#define TRANSLATED_PHYSICAL_MEMORY_BEGIN 0xffff800000000000llu
#define MMIO_BEGIN 0xffffffff00000000llu
#define KERNEL_DATA_BEGIN 0xffffffff80000000llu
#define PAGE_TABLES_BEGIN 0xffffffffc0000000llu
// return non 0 value iif the given address
// resides in kernel memory
static inline int is_user(uint64_t virtual_address) {
// user is in the lower half
return ! (virtual_address & (1llu << 63));
}
static inline int is_allocator_memory(uint64_t virtual_address) {
// user is in the lower half
return (virtual_address & 0xff80000000000000) == 0xff00000000000000;
}
static inline int is_kernel_memory(uint64_t virtual_address) {
// kernel is in the 2 higher GB
return (virtual_address & KERNEL_DATA_BEGIN) == KERNEL_DATA_BEGIN;
}
static inline int is_mmio(uint64_t virtual_address) {
// between -4 GB and -2 GB
return (virtual_address & KERNEL_DATA_BEGIN) == MMIO_BEGIN;
}
// return the physical address of a
// stivale2 high half pointer
static inline uint64_t early_virtual_to_physical(
const void* virtual_address) {
return ~KERNEL_DATA_BEGIN & (uint64_t)virtual_address;
}
// translate a physical memory address
// to access it where it is mapped
static void* translate_address(void* phys_addr) {
return (void*)((uint64_t)phys_addr | TRANSLATED_PHYSICAL_MEMORY_BEGIN);
}
Loading…
Cancel
Save