Mathieu Serandour 1 year ago
parent
commit
181ddaed57
  1. 4
      Makefile
  2. BIN
      core
  3. 48
      kernel/drivers/pcie.c
  4. 5
      kernel/memory/kalloc.c
  5. 101
      kernel/memory/paging.c
  6. 2
      kernel/memory/paging.h
  7. 2
      kernel/memory/vmap.h
  8. 1
      kernel/video/terminal.h

4
Makefile

@ -16,12 +16,14 @@ QEMU_ARGS := -monitor stdio \
-bios /usr/share/ovmf/OVMF.fd \
-m 8192 \
-M q35 \
-usb \
-device usb-host \
-vga virtio \
-no-reboot \
-D qemu.log \
-drive format=raw,file=
QEMU_DEBUG_ARGS:= -no-shutdown -d int -s -S $(QEMU_ARGS)
QEMU_DEBUG_ARGS:= -no-shutdown -d int $(QEMU_ARGS)
run: all

BIN
core

Binary file not shown.

48
kernel/drivers/pcie.c

@ -111,11 +111,13 @@ static void scan_devices(void) {
unsigned func,
struct PCIE_configuration_space* config_space
) {
current->next = kmalloc(
sizeof(struct device_desc_node)
);
klog_debug("device %u:%u - %u", bus, device, func);
current = current->next;
current->next = NULL;
@ -136,15 +138,17 @@ static void scan_devices(void) {
// the device
if(!check_function(bus,device, 0))
continue;
insert(bus, device, 0, get_config_space_base(bus,device,0));
for(unsigned func = 0; func < 8; func++)
for(unsigned func = 1; func < 8; func++)
if(check_function(bus,device, func))
insert(bus, device, func, get_config_space_base(bus,device,func));
}
}
// now create the final array
installed_devices = kmalloc(
n_installed_devices
@ -165,7 +169,27 @@ static void scan_devices(void) {
kfree(device);
device = next;
}
}
// identity map every page that might be a config space
static void identity_map_possible_config_spaces(void) {
for(unsigned i = 0; i < pcie_descriptor.size; i++) {
// identity map the corresponding pages
map_pages(
(uint64_t)pcie_descriptor.array[i].address, // phys
(uint64_t)pcie_descriptor.array[i].address, // virt
256 * // busses
32 * // devices
8, // functions
PRESENT_ENTRY | PCD | PL_XD
// no cache, execute disable
);
}
}
static void identity_unmap_possible_config_spaces(void) {
for(unsigned i = 0; i < pcie_descriptor.size; i++)
;// unmap_pages((uint64_t)pcie_descriptor.array[i].address, 256 * 32 * 8);
}
/**
@ -178,28 +202,20 @@ void pcie_init(void) {
klog_debug("init pcie...");
// calculate the highest bus number
for(unsigned i = 0; i < pcie_descriptor.size; i++) {
// map the corresponding pages
if(pcie_descriptor.array[i].end_bus > max_bus)
max_bus = pcie_descriptor.array[i].end_bus;
}
klog_debug("map %lx", pcie_descriptor.array[i].address);
// identity map the corresponding pages
map_pages(
pcie_descriptor.array[i].address, // phys
pcie_descriptor.array[i].address, // virt
256 * // busses
32 * // devices
8 , // functions
PRESENT_ENTRY | PCD | PL_XD
// no cache, execute disable
);
identity_map_possible_config_spaces();
scan_devices();
}
asm("hlt");
identity_unmap_possible_config_spaces();
scan_devices();
klog_info("found %u PCI Express devices", n_installed_devices);
}

5
kernel/memory/kalloc.c

@ -80,12 +80,13 @@ static void expand_heap(size_t size) {
size_t new_kheap_pages_size = (kheap_size + size + 0xfff) >> 12;
size_t old_kheap_pages_size = (kheap_size + 0xfff) >> 12;
// alloc extra pages if needed
if(new_kheap_pages_size != old_kheap_pages_size) {
alloc_pages(
kheap_begin + (old_kheap_pages_size << 12),
new_kheap_pages_size - old_kheap_pages_size,
PRESENT_ENTRY// | PL_XD // execute disable pages
PRESENT_ENTRY | PL_XD // execute disable pages
);
}
@ -245,7 +246,9 @@ static seg_header* split_segment(seg_header* pred, seg_header* tosplit, size_t s
void kheap_init(void) {
klog_debug("init kernel heap...");
expand_heap(MIN_EXPAND_SIZE);
}

101
kernel/memory/paging.c

@ -17,11 +17,12 @@
#define IA32_EFER_NXE_BIT (1lu << 11)
// size of bulks of allocation
// the page buffer is 64 long
// so we suppose that 4096 pages
// won't make more than 64 page
// tables
#define MAX_ALLOC 1024
// the page buffer is 16 long
// in the worst case scenario,
// 8192 page allocs
// -> 1 pdpt + 1 pd + 9 pt
// = 11 newpages
#define MAX_ALLOC 4096
/**
@ -244,7 +245,6 @@ static void map_kernel(const struct stivale2_struct_tag_memmap* memmap) {
break;
default:
/* data */
flags |= PL_XD;
break;
}
//alloc the page table pages
@ -426,6 +426,7 @@ static void* get_entry_or_allocate(void** restrict table, unsigned index) {
void* entry = virtual_addr_table[index];
if(!present_entry(entry)) {
void* e = create_table_entry(
alloc_page_table(),
PRESENT_ENTRY);
@ -435,6 +436,19 @@ static void* get_entry_or_allocate(void** restrict table, unsigned index) {
return entry;
}
// kernel panic if the entrty is not present
static void* get_entry_or_panic(void** restrict table, unsigned index) {
assert(index < 512);
void** virtual_addr_table = translate_address(table);
void* entry = virtual_addr_table[index];
assert(present_entry(entry));
return entry;
}
/**
* this function cannot be called in a callback
* because it would lead to recursion.
@ -447,6 +461,7 @@ static void internal_map_pages(uint64_t physical_addr,
uint64_t flags) {
while(count > 0) {
// fetch table indexes
unsigned pml4i = pml4_offset(virtual_addr),
pdpti = pdpt_offset(virtual_addr),
@ -499,19 +514,16 @@ void alloc_pages(void* virtual_addr_begin,
// don't allow recusion
alloc_page_table_realloc = 0;
void callback(
uint64_t physical_address,
uint64_t virtual_address,
size_t c) {
internal_map_pages(physical_address,
virtual_address,
c,
flags);
};
while(count > 0) {
unsigned size = count;
if(size > MAX_ALLOC)
@ -544,4 +556,73 @@ void map_pages(uint64_t physical_addr,
// count <= 64
fill_page_table_allocator_buffer(64);
internal_map_pages(physical_addr, virtual_addr, count, flags);
}
}
// return 1 if any of the page entries is present
int is_range_unmapped(pte* page_table, unsigned begin, unsigned end) {
pte* translated = translate_address(page_table);
for(int i = begin; i < end; i++) {
if(present_entry(translated[i]))
return 1;
}
return 0;
}
void unmap_pages(uint64_t virtual_addr, size_t count) {
while(count > 0) {
// fetch table indexes
unsigned pml4i = pml4_offset(virtual_addr),
pdpti = pdpt_offset(virtual_addr),
pdi = pd_offset(virtual_addr),
pti = pt_offset(virtual_addr);
assert(pml4i == 0 || pml4i == 511 || pml4i == 256);
// those entries should exist
pml4e restrict pml4entry = extract_pointer(get_entry_or_panic((void**)pml4, pml4i));
pdpte restrict pdptentry = extract_pointer(get_entry_or_panic((void**)pml4entry, pdpti));
pde restrict pdentry = extract_pointer(get_entry_or_panic((void**)pdptentry, pdi));
// keep track of the first & last element
// to unmap, we are sure that in this range
// everything is unmapped
unsigned begin = pti;
while(count > 0 && pti < 512) {
void** entry_ptr = (void**)translate_address(pdentry) + pti;
if(!present_entry(*entry_ptr)) {
char buff[256];
sprintf(buff,
"unmap_pages(...):\n"
" tried to unmap not mapped virtual memory 0x%lx",
virtual_addr);
panic(buff);
}
pti++;
count--;
virtual_addr += 0x1000;
}
unsigned end = pti;
// unmap the page map if empty
if(is_range_unmapped(pdentry, 0, begin))
continue;
if(is_range_unmapped(pdentry, end, 512))
continue;
// the page table contains no entry
// let's free it
physfree(pdentry);
}
}

2
kernel/memory/paging.h

@ -46,6 +46,8 @@ void map_pages(uint64_t physical_addr,
size_t count,
uint64_t flags);
void unmap_pages(uint64_t virtual_addr, size_t count);
void alloc_pages(void* virtual_addr,
size_t count,
uint64_t flags);

2
kernel/memory/vmap.h

@ -111,6 +111,6 @@ static inline uint64_t early_virtual_to_physical(
// translate a physical memory address
// to access it where it is mapped
static inline void* translate_address(void* phys_addr) {
static inline void* __attribute__((pure)) translate_address(void* phys_addr) {
return (void*)((uint64_t)phys_addr | TRANSLATED_PHYSICAL_MEMORY_BEGIN);
}

1
kernel/video/terminal.h

@ -38,6 +38,7 @@ void terminal_clear(void);
// change the default terminal handler,
// which is an empty function
// h = NULL will make a safe empty handler
void set_terminal_handler(terminal_handler_t h);

Loading…
Cancel
Save