Browse Source

fixed page level writable flag

master
Mathieu Serandour 1 year ago
parent
commit
052c4b02b6
  1. 4
      kernel/acpi/acpi.c
  2. 11
      kernel/drivers/nvme/nvme.c
  3. 4
      kernel/drivers/nvme/queue.c
  4. 6
      kernel/drivers/pcie/scan.c
  5. 8
      kernel/lib/elf/elf.c
  6. 2
      kernel/memory/heap.c
  7. 32
      kernel/memory/paging.c

4
kernel/acpi/acpi.c

@ -96,9 +96,9 @@ void map_acpi_mmios(void) {
// mmios to map: HPET, PCIE
// cache disable
map_pages((uint64_t)apic_config_base, APIC_VADDR, 1,
PRESENT_ENTRY | PCD | PL_XD);
PRESENT_ENTRY | PCD | PL_XD | PL_RW);
map_pages((uint64_t)hpet_config_space, HPET_VADDR, 1,
PRESENT_ENTRY | PCD | PL_XD);
PRESENT_ENTRY | PCD | PL_XD | PL_RW);
}
static void parse_hpet(const struct HPET* table) {

11
kernel/drivers/nvme/nvme.c

@ -195,7 +195,7 @@ static uint64_t createPRP(void) {
paddr,
vaddr,
1,
PRESENT_ENTRY | PL_XD | PCD
PRESENT_ENTRY | PL_XD | PCD | PL_RW
);
return paddr;
@ -217,7 +217,7 @@ static void freePRP(uint64_t paddr) {
paddr,
vaddr,
1,
PRESENT_ENTRY | PL_XD // cache enable
PRESENT_ENTRY | PL_XD | PL_RW // cache enable
);
physfree(paddr);
@ -1106,13 +1106,6 @@ void nvme_async_read(struct driver* this,
_sti();
// the copy from prp to buf will occur
// in the irq
/*
memcpy(
buf,
translate_address((void*)prp_paddr),
c << shift
);
*/
buf += c << shift;

4
kernel/drivers/nvme/queue.c

@ -29,7 +29,7 @@ struct queue create_queue(
paddr,
(uint64_t)vaddr,
1,
PRESENT_ENTRY | PL_XD | PCD
PRESENT_ENTRY | PL_XD | PCD | PL_RW
);
return (struct queue) {
@ -55,7 +55,7 @@ void free_queue(struct queue* q) {
paddr,
vaddr,
1,
PRESENT_ENTRY | PL_XD
PRESENT_ENTRY | PL_XD | PL_RW
);
physfree(paddr);

6
kernel/drivers/pcie/scan.c

@ -62,7 +62,7 @@ static void map_bar(uint64_t paddr,
(size
+ ((uint64_t)vaddr & 0xfff) // take account
+ 0xfff) / 0x1000, // of missalignment
PRESENT_ENTRY | PCD
PRESENT_ENTRY | PCD | PL_RW
);
}
@ -330,8 +330,8 @@ static void map_possible_config_spaces(void) {
256 * // busses
32 * // devices
8, // functions
PRESENT_ENTRY | PCD | PL_XD
// no cache, execute disable
PRESENT_ENTRY | PCD | PL_XD | PL_RW
// no cache, execute disable, writable
);
}
}

8
kernel/lib/elf/elf.c

@ -108,7 +108,7 @@ elf_program_t* elf_load(const void* file, size_t file_size) {
"prog->segs[j].flag=%lu \n",
i,
prog->segs[j].base,
prog->segs[j].length >> 12,
(prog->segs[j].length+0xfff) >> 12,
prog->segs[j].flags
);
@ -136,7 +136,7 @@ elf_program_t* elf_load(const void* file, size_t file_size) {
alloc_pages(
prog->segs[j].base,
page_count,
PRESENT_ENTRY
PRESENT_ENTRY | PL_US | PL_RW
);
@ -151,13 +151,13 @@ elf_program_t* elf_load(const void* file, size_t file_size) {
file + phdr->p_offset,
phdr->p_filesz
);
/*
remap_pages(
prog->segs[j].base,
page_count,
0
);
*/
}
prog->n_segs = j;

2
kernel/memory/heap.c

@ -137,7 +137,7 @@ static void expand_heap(size_t size) {
alloc_pages(
heap_begin + (old_heap_pages_size << 12),
new_heap_pages_size - old_heap_pages_size,
PRESENT_ENTRY | PL_XD // execute disable pages
PRESENT_ENTRY | PL_XD | PL_RW // execute disable pages
);
}

32
kernel/memory/paging.c

@ -11,6 +11,7 @@
#include "../lib/registers.h"
#define CR0_WP (1lu << 16)
#define CR0_PG_BIT (1lu << 31)
#define CR4_PAE_BIT (1lu << 5)
#define CR4_PCIDE (1lu << 17)
@ -173,10 +174,12 @@ static void map_physical_memory(const struct stivale2_struct_tag_memmap* memmap)
void* virtual_addr = translate_address((void *)phys_addr);
internal_map_pages(phys_addr,
(uint64_t)virtual_addr,
size,
PRESENT_ENTRY | PL_XD);
internal_map_pages(
phys_addr,
(uint64_t)virtual_addr,
size,
PRESENT_ENTRY | PL_XD | PL_RW
);
// use the allocator to allocate page tables
// to map its own data
}
@ -236,18 +239,18 @@ static void map_kernel(const struct stivale2_struct_tag_memmap* memmap) {
{
case 0:
/* .text */
flags |= PL_RW;
break;
case 1:
/* rodata */
flags |= PL_XD;
flags |= PL_RW;
break;
case 2:
flags |= PL_RW;
/* data+bss */
break;
default:
//modules: do not map in higher half!
flags |= PL_RW;
virtual_addr = base | TRANSLATED_PHYSICAL_MEMORY_BEGIN;
break;
}
@ -318,10 +321,11 @@ void init_paging(const struct stivale2_struct_tag_memmap* memmap) {
// 256th supervisor: 0xffff800000000000 -> 0xffff807fffffffff
// 511st supervisor: 0xffffff8000000000 -> 0xffffffffffffffff
pml4[0] = create_table_entry(
alloc_page_table(), // alloc a new page table
// with pmm
PRESENT_ENTRY // execute enable, read
// write for all the lower half
alloc_page_table(), // alloc a new page table
// with pmm
PRESENT_ENTRY | PL_US // execute enable, read
| PL_RW // write for all the lower half
// and accessible from userspace
);
// the two high half memory regions are supervisor only
@ -329,13 +333,13 @@ void init_paging(const struct stivale2_struct_tag_memmap* memmap) {
// stays in the pml4 table
pml4[256] = create_table_entry(
alloc_page_table(), // once again use the pmm
PRESENT_ENTRY | PL_US // supervisor flag the whole
PRESENT_ENTRY | PL_RW // supervisor only
);
// same as above
pml4[511] = create_table_entry(
alloc_page_table(), // once again use the pmm
PRESENT_ENTRY | PL_US
PRESENT_ENTRY | PL_RW // supervisor only
);
@ -362,7 +366,7 @@ void append_paging_initialization(void) {
set_cr4((get_cr4() | CR4_PAE_BIT) & ~CR4_PCIDE);
// enable the PG bit
set_cr0(get_cr0() | CR0_PG_BIT);
set_cr0(get_cr0() | CR0_PG_BIT | CR0_WP);
// enable NXE bit
write_msr(IA32_EFER_MSR, read_msr(IA32_EFER_MSR) | IA32_EFER_NXE_BIT);
@ -449,7 +453,7 @@ static void* get_entry_or_allocate(void** restrict table, unsigned index) {
void* e = create_table_entry(
alloc_page_table(),
PRESENT_ENTRY);
PRESENT_ENTRY | PL_US | PL_RW);
return virtual_addr_table[index] = e;
}
else

Loading…
Cancel
Save