Mathieu Serandour 1 year ago
parent
commit
a9a2544078
  1. 2
      kernel/linker.ld
  2. 6
      kernel/memory/gdt.s
  3. 95
      kernel/memory/paging.c
  4. 57
      kernel/memory/paging.h
  5. 42
      kernel/memory/physical_allocator.c

2
kernel/linker.ld

@ -22,6 +22,8 @@ SECTIONS
/* Since we are going to use PIE, this is just the base load address, but the */
/* bootloader will be able to relocate us as it sees fit. */
. = 0xffffffff80200000;
_kernel_base_address = .;
.text : {
*(.text*)

6
kernel/memory/gdt.s

@ -25,10 +25,4 @@ far_ret:
mov es, ax
mov fs, ax
mov gs, ax
ret
_cr3:
mov rax, rdi
mov cr3, rax
ret

95
kernel/memory/paging.c

@ -0,0 +1,95 @@
#include "paging.h"
#include "../debug/assert.h"
#include <stivale2.h>
extern uint64_t get_cr0(void);
extern void set_cr0(uint64_t cr0);
extern void _cr3(uint64_t cr3);
extern uint64_t get_cr4(void);
extern void set_cr4(uint64_t cr4);
#define CR0_PG_BIT (1lu << 31)
#define CR4_PAE_BIT (1lu << 5)
/**
* 4th level table (pde) entry
*/
typedef const void* pte;
/**
* 3rd level table (pde) entry
*/
typedef const pte* pde;
/**
* 2nd level table (pdpt) entry
*
*/
typedef const pde* pdpte;
/**
* 1st level table (pml4) entry
*/
typedef const pdpte* pml4e;
pml4e pml4_table[512] __attribute__((aligned(4096))) = {0};
/*
struct stivale2_struct_tag_memmap {
struct stivale2_tag tag; // Identifier: 0x2187f79e8612de07
uint64_t entries; // Count of memory map entries
struct stivale2_mmap_entry memmap[]; // Array of memory map entries
};
struct stivale2_mmap_entry {
uint64_t base; // Physical address of base of the memory section
uint64_t length; // Length of the section
uint32_t type; // Type (described below)
uint32_t unused;
};
enum stivale2_mmap_type : uint32_t {
USABLE = 1,
RESERVED = 2,
ACPI_RECLAIMABLE = 3,
ACPI_NVS = 4,
BAD_MEMORY = 5,
BOOTLOADER_RECLAIMABLE = 0x1000,
KERNEL_AND_MODULES = 0x1001,
FRAMEBUFFER = 0x1002
};
*/
void init_paging(const struct stivale2_struct_tag_memmap* memory_map) {
for(unsigned i = 0; i < memory_map->entries; i++) {
const struct stivale2_mmap_entry* e = &memory_map->memmap[i];
kprintf("%0x16x - %0x16x - %d\n", e->base, e->base+e->length, e->type);
}
// get the physical address of the pml4 table
uint64_t lower_half_ptr = ~0xffffffff80000000llu | (uint64_t)&pml4_table;
// Page level Write Through (PWT) 0
// Page level Cache Disable (PCD) 0
// [63:MAXPHYADDR] must be 0!!! as 'lower_half_ptr' is supposed to
// be a physical address already, it should be the case
_cr3(lower_half_ptr);
// enable PAE in cr4
set_cr4(get_cr4() | CR0_PG_BIT);
// enable the PG bit
set_cr0(get_cr0() | CR0_PG_BIT);
}
void map_pages(uint64_t physical_addr, uint64_t virtual_addr, size_t count) {
(void) (physical_addr + virtual_addr + count);
}

57
kernel/memory/paging.h

@ -0,0 +1,57 @@
#pragma once
#include <stddef.h>
#include <stdint.h>
struct stivale2_struct_tag_memmap;
//11111111111
//00100010001
//11011101110
/**
* memory modeling:
* | MR1 | MR2 | //// | MR3 | ....
* MR = 64 MB memory range
*
* MR linked list entry: {
* MR_base, // base address of the MR
* MR_length, // length, in pages
* MR_next, // the next MR in the list, or null
* }
*
* 4 global linked lists:
* memory_list[i] = MR linked list entry *
* ML0: at least 1 64K contiguous available range (very fast allocation)
* ML1: at least 1 32K contiguous available range (fast allocation)
* ML1: at least 1 16K contiguous available range (medium allocation)
* ML1: at least 1 4K contiguous available range (slow allocation)
*
*
* MR layout;
* | ----------------------------- 4K HEADER --------------------------- | BLOCKS |
* | 1152 header | 128 bitmap3 | 256 bitmap2 | 512 bitmap1 | 2K bitmap0 | 64 MB - 4K pages |
*
* header: {
* size, // of the range, in pages
* rem0, // number of free pages (4k)
* rem1, // number of free super pages (16k)
* rem2, // number of free mega pages (32k)
* rem3, // number of free ultra pages (64k)
* }
*
*
*/
/**
* enable PML4 4K paging
*
*/
void init_paging(const struct stivale2_struct_tag_memmap *);
/**
* map pages from a given physical address to a given virtual address
* map 'count' continuous pages
*/
void map_pages(uint64_t physical_addr, uint64_t virtual_addr, size_t count);

42
kernel/memory/physical_allocator.c

@ -0,0 +1,42 @@
/**
* memory modeling:
* | MR1 | MR2 | //// | MR3 | ....
* MR = 64 MB memory range
*
* MR linked list entry: {
* MR_base, // base address of the MR
* MR_length, // length, in pages
* MR_next, // the next MR in the list, or null
* }
*
* 4 global linked lists:
* memory_list[i] = MR linked list entry *
* ML0: at least 1 64K contiguous available range (very fast allocation)
* ML1: at least 1 32K contiguous available range (fast allocation)
* ML1: at least 1 16K contiguous available range (medium allocation)
* ML1: at least 1 4K contiguous available range (slow allocation)
*
*
* MR layout;
* | ----------------------------- 4K HEADER --------------------------- | BLOCKS |
* | 1152 header | 128 bitmap3 | 256 bitmap2 | 512 bitmap1 | 2K bitmap0 | 64 MB - 4K pages |
*
* header: {
* size, // of the range, in pages
* rem0, // number of free pages (4k)
* rem1, // number of free super pages (16k)
* rem2, // number of free mega pages (32k)
* rem3, // number of free ultra pages (64k)
* }
*
*
*/
/**
* @brief allocate
*
* @param size
* @return void*
*/
void* physalloc(size_t size);
Loading…
Cancel
Save