From 3f83577f8f3ad6aba0c87e79ea33cd4d904800a5 Mon Sep 17 00:00:00 2001 From: Morten Delenk Date: Sun, 3 Sep 2017 09:56:58 +0100 Subject: [PATCH] added paging to x86 --- kernel/arch/arm/flags.cmake | 6 +- kernel/arch/x86/include/paging.h | 52 +++++ kernel/arch/x86/paging.cpp | 347 +++++++++++++++++++++++++++++++ kernel/arch/x86/pc/flags.cmake | 2 +- kernel/arch/x86/pc/start.cpp | 10 + kernel/arch/x86/sourcegen.py | 1 + kernel/hw/pc/config.py | 1 + kernel/hw/pc/pmm/pmm.cpp | 2 +- kernel/src/include/paging.hpp | 32 +++ kernel/src/include/pmm.hpp | 7 +- kernel/src/paging.cpp | 36 ++++ kernel/src/pmm.cpp | 15 +- 12 files changed, 502 insertions(+), 9 deletions(-) create mode 100644 kernel/arch/x86/include/paging.h create mode 100644 kernel/arch/x86/paging.cpp create mode 100644 kernel/src/include/paging.hpp create mode 100644 kernel/src/paging.cpp diff --git a/kernel/arch/arm/flags.cmake b/kernel/arch/arm/flags.cmake index 7b70c3e..ea89365 100644 --- a/kernel/arch/arm/flags.cmake +++ b/kernel/arch/arm/flags.cmake @@ -1,3 +1,3 @@ -SET(PLATFORM_C_FLAGS "-I../../kernel/arch/arm/include") -SET(PLATFORM_CXX_FLAGS "${PLATFORM_C_FLAGS}") -SET(PLATFORM_ASM_FLAGS "${PLATFORM_C_FLAGS}") +SET(ISA_C_FLAGS "-I../../kernel/arch/arm/include") +SET(ISA_CXX_FLAGS "${ISA_C_FLAGS}") +SET(ISA_ASM_FLAGS "${ISA_C_FLAGS}") diff --git a/kernel/arch/x86/include/paging.h b/kernel/arch/x86/include/paging.h new file mode 100644 index 0000000..2fa0dbc --- /dev/null +++ b/kernel/arch/x86/include/paging.h @@ -0,0 +1,52 @@ +#pragma once +#include +#include +#include +#include +struct pagedir { + bool active:1; + bool writeable:1; + bool privileged:1; + bool no_write_cache:1; + bool no_read_cache:1; + bool accessed:1; + bool ignored:1; + bool size:1; + bool ignored2:1; + uint8_t ignored3:3; + phys_t pagetable:20; +}__attribute__((packed)); +static_assert(sizeof(pagedir)==4); + +struct pagetbl { + bool active:1; + bool writeable:1; + bool privileged:1; + bool no_write_cache:1; + bool no_read_cache:1; + bool accessed:1; + bool written:1; + bool pat:1; + bool no_flush_after_cr3:1; + uint8_t ignored:2; + bool lazy:1; + phys_t page:20; +}__attribute__((packed)); +static_assert(sizeof(pagetbl)==4); + +struct paging_context_x86: public paging_context { + phys_t pagedir_addr; + paging_context_x86(); + virtual ~paging_context_x86(); + virtual void switch_context(); + virtual void map_pagetable(struct paging_context *); + virtual void *mmap(phys_t addr, void *dest, protection prot=protection::RW, bool lazy=true); + virtual void munmap(void *addr); + virtual bool is_mapped(void *addr); + virtual void mprotect(void *addr, protection prot); + virtual bool has_no_exec(); + virtual void *get_exception_address(cpu_state *cpu); + virtual bool lazymap(void *addr, cpu_state *cpu); + pagedir * operator*(); + pagetbl * operator[](phys_t); +}; diff --git a/kernel/arch/x86/paging.cpp b/kernel/arch/x86/paging.cpp new file mode 100644 index 0000000..2c77eda --- /dev/null +++ b/kernel/arch/x86/paging.cpp @@ -0,0 +1,347 @@ +#include +#include +extern int kernel_start; +extern int kernel_end; +paging_context_x86::paging_context_x86(): paging_context() { + *pmm >> pagedir_addr; + if(context_enabled) { + //Copy System section (first 256 entries) over to the new context + current_context->mmap(pagedir_addr, (void*)0x401000, protection::RW, false); + pagedir *src=(pagedir *)0x400000; + pagedir *dest=(pagedir *)0x401000; + for(int i=0;i<256;i++) + dest[i]=src[i]; + for(int i=256;i<1024;i++) + dest[i].active=false; + //remap self-map correctly + dest[1].active=false; + dest[2].active=false; + dest[3].active=false; + phys_t selfmap_meta; + *pmm >> selfmap_meta; + + dest[1].pagetable = selfmap_meta >> 12; + dest[1].writeable = true; + dest[1].privileged = false; + dest[1].no_write_cache = true; + dest[1].no_read_cache = false; + dest[1].size = false; + dest[1].active = true; + + phys_t selfmap; + *pmm >> selfmap; + dest[2].pagetable = selfmap >> 12; + dest[2].writeable = true; + dest[2].privileged = false; + dest[2].no_write_cache = true; + dest[2].no_read_cache = false; + dest[2].size = false; + dest[2].active = true; + + current_context->map_pagetable(this); + + //Remap all parts of the system + for(int i=0;i<256;i++) { + if(!src[i].active) + continue; + if(i == 1 || i == 2 || i == 3) + continue; + pagetbl *pagetbl_start = (pagetbl *)(0x800000 + i << 22); + for(int j=0;j<1024;j++) { + if(!pagetbl_start[j].active) + continue; + mmap(pagetbl_start[j].page << 12, pagetbl_start+1024, protection::RW, false); + + } + } + return; + } + //Map the entire kernel + pagedir *context=(pagedir *)pagedir_addr; + for(int i=0;i<1024;i++) + context[i].active=false; + phys_t kernel_tbl; + *pmm >> kernel_tbl; + context[0].pagetable=kernel_tbl >> 12; + context[0].writeable=true; + context[0].privileged=false; + context[0].no_write_cache=false; + context[0].no_read_cache=false; + context[0].size=false; + context[0].active=true; + + pagetbl *kerneltbl = (pagetbl *)kernel_tbl; + for(int i=0;i<1024;i++) + kerneltbl[i].active=false; + for(phys_t i=((phys_t)&kernel_start)>>12,p = (phys_t)&kernel_start; p < (phys_t)&kernel_end; p+=0x1000, i++) { + kerneltbl[i].page=i; + kerneltbl[i].writeable=true; + kerneltbl[i].privileged=false; + kerneltbl[i].no_write_cache=false; + kerneltbl[i].no_read_cache=false; + kerneltbl[i].pat=false; + kerneltbl[i].no_flush_after_cr3=true; + kerneltbl[i].active=true; + } + + //self-mapping page directory + phys_t pdir_selfmap_addr; + *pmm >> pdir_selfmap_addr; + context[1].pagetable = pdir_selfmap_addr >> 12; + context[1].writeable=true; + context[1].privileged=false; + context[1].no_write_cache=true; + context[1].no_read_cache=false; + context[1].size=false; + context[1].active=true; + pagetbl *pdir_selfmap=(pagetbl *)pdir_selfmap_addr; + for(int i=0;i<1024;i++) { + pdir_selfmap[i].active=false; + pdir_selfmap[i].lazy=false; + } + + pdir_selfmap[0].page=pagedir_addr >> 12; + pdir_selfmap[0].writeable=true; + pdir_selfmap[0].privileged=false; + pdir_selfmap[0].no_write_cache=true; + pdir_selfmap[0].no_read_cache=false; + pdir_selfmap[0].pat=false; + pdir_selfmap[0].no_flush_after_cr3=false; + pdir_selfmap[0].active=true; + + phys_t ptbl_selfmap_addr; + *pmm >> ptbl_selfmap_addr; + context[2].pagetable = ptbl_selfmap_addr>>12; + context[2].writeable=true; + context[2].privileged=false; + context[2].no_write_cache=true; + context[2].no_read_cache=false; + context[2].size=false; + context[2].active=true; + pagetbl *ptbl_selfmap=(pagetbl *)ptbl_selfmap_addr; + for(int i=0;i<1024;i++) { + ptbl_selfmap[i].active=false; + ptbl_selfmap[i].lazy=false; + } + + + //Create a self-map + for(int i=0;i<1024;i++) { + if(!context[i].active) + continue; + ptbl_selfmap[i].page = context[i].pagetable; + ptbl_selfmap[i].writeable=true; + ptbl_selfmap[i].privileged=false; + ptbl_selfmap[i].no_write_cache=true; + ptbl_selfmap[i].no_read_cache=false; + ptbl_selfmap[i].pat=false; + ptbl_selfmap[i].no_flush_after_cr3=false; + ptbl_selfmap[i].active=true; + } +} + +paging_context_x86::~paging_context_x86() { + //Deallocate user space + pagedir *pd = (pagedir*)0x401000; + for(int i=256;i<1024;i++) { + if(!pd[i].active) + continue; + pagetbl *pt = (pagetbl*)(0xC00000+i<<12); + for(int j=0;j<1024;j++) { + if(!pt[j].active) + continue; + void * addr = (void *)((i<<22)+(j<<12)); + munmap(addr); + } + munmap(pt); + *pmm << (pd[i].pagetable << 12); + } + //Deallocate selfmap + for(int i=1;i<3;i++) { + if(!pd[i].active) + continue; + pagetbl *pt = (pagetbl*)(0xC00000+i<<12); + for(int j=0;j<1024;j++) { + if(!pt[j].active) + continue; + void * addr = (void *)((i<<22)+(j<<12)); + munmap(addr); + } + munmap(pt); + *pmm << (pd[i].pagetable << 12); + } + munmap(pd); + *pmm << pagedir_addr; +} + +void paging_context_x86::switch_context() { + asm volatile("mov %0, %%cr3" :: "r"(pagedir_addr) : "memory"); + if(!context_enabled) { + uint32_t cr0; + asm volatile("mov %%cr0, %0" : "=r"(cr0)); + cr0 |= (1<<31); + asm volatile("mov %0, %%cr0" :: "r"(cr0) : "memory"); + } + paging_context::switch_context(); +} + +void paging_context_x86::map_pagetable(struct paging_context *pc_t) { + paging_context_x86 *pc = (paging_context_x86*)pc_t; + mmap(pc->pagedir_addr, (void*)0x401000, protection::RW, false); + pagedir * pd = (pagedir*)0x401000; + for(int i=0;i<1024;i++) { + if(!pd[i].active) + continue; + mmap(pd[i].pagetable<<12, (void*)(0xC00000), protection::RW, false); + } +} + +void *paging_context_x86::mmap(phys_t addr, void *dest, protection prot, bool lazy) { + if((!addr)&&(!lazy)) { + *pmm >> addr; + } + if(!dest) { + if(!lazy) { + for(phys_t i=16; i < 256; i++) { + if((**this)[i].active) { + for(phys_t j=0; j<1024; j++) { + if((*this)[i][j].active) + continue; + dest = (void*)((i<<22)|(j<<12)); + } + continue; + } + dest = (void*)(i<<22); + } + } else { + for(phys_t i=256; i; i++) { + if((**this)[i].active) { + for(phys_t j=0; j<1024; j++) { + if((*this)[i][j].active) + continue; + if((*this)[i][j].lazy) + continue; + dest = (void*)((i<<22)|(j<<12)); + } + continue; + } + dest = (void*)(i<<22); + } + } + } + if(!dest) { + panic("Not enough virtual memory is available for mmap()"); + } + phys_t i=((phys_t)dest)>>22; + phys_t j=((phys_t)dest)>>12; + j&=0x3FF; + if(!(**this)[i].active) { + phys_t ptbl_addr; + *pmm >> ptbl_addr; + mmap(ptbl_addr, (*this)[i], protection::RW, false); + pagetbl *pent = (*this)[i]; + for(int i=0;i<1024;i++) + pent[i].active=false; + pagedir *ptbl = &((**this)[i]); + ptbl->pagetable = ptbl_addr >> 12; + ptbl->writeable = true; + ptbl->privileged = (i < 256) ? false: true; + ptbl->no_write_cache = (i < 16) ? true : false; + ptbl->no_read_cache = false; + ptbl->size = false; + ptbl->active=true; + } + pagetbl *pent = (*this)[i] + j; + uint8_t p = (uint8_t)prot; + pent->page = addr >> 12; + pent->writeable = (p & 2)? true : false; + pent->privileged = (p & 4)? true : false; + pent->no_write_cache = (i < 16) ? true : false; + pent->no_read_cache = false; + pent->pat = false; + pent->no_flush_after_cr3 = false; + pent->lazy = lazy; + pent->active = !lazy; + if(current_context == this) { + asm volatile("invlpg %0" :: "m"(*(char*)dest) : "memory"); + } + return dest; +} + +void paging_context_x86::munmap(void *addr) { + phys_t i=((phys_t)addr)>>22; + phys_t j=((phys_t)addr)>>12; + j &= 0x3FF; + if(!(**this)[i].active) + return; + if(!(*this)[i][j].active) + (*this)[i][j].lazy = false; + *pmm << ((*this)[i][j].page << 12); + (*this)[i][j].active=false; +} + +bool paging_context_x86::is_mapped(void *addr) { + phys_t i=((phys_t)addr)>>22; + phys_t j=((phys_t)addr)>>12; + j &= 0x3FF; + if(!(**this)[i].active) + return false; + return (*this)[i][j].active; +} + +void paging_context_x86::mprotect(void *addr, protection prot) { + phys_t i=((phys_t)addr)>>22; + phys_t j=((phys_t)addr)>>12; + j &= 0x3FF; + if(!(**this)[i].active) + return; + if((!(*this)[i][j].active) && (!(*this)[i][j].lazy)) + return; + pagetbl *pent= &((*this)[i][j]); + uint8_t p = (uint8_t)prot; + pent->writeable = (p & 2) ? true : false; + pent->privileged = (p & 4) ? true : false; + if(current_context == this) + asm volatile("invlpg %0" :: "m"(*(char*)addr) : "memory"); +} + +bool paging_context_x86::has_no_exec() { + return false; +} + +void *paging_context_x86::get_exception_address(cpu_state *cpu) { + uint32_t cr2; + asm volatile("mov %%cr2, %0" : "=r"(cr2)); + return (void*)cr2; +} + +bool paging_context_x86::lazymap(void *addr, cpu_state *cpu) { + phys_t i=((phys_t)addr)>>22; + phys_t j=((phys_t)addr)>>12; + j &= 0x3FF; + if(!(**this)[i].active) + return false; + if((*this)[i][j].active) + return false; + if(!(*this)[i][j].lazy) + return false; + phys_t page; + *pmm >> page; + pagetbl *pent = &((*this)[i][j]); + pent->page = page >> 12; + pent->lazy = false; + pent->active = true; + return true; +} + +pagedir * paging_context_x86::operator*() { + if(current_context == this) + return (pagedir*)0x400000; + return (pagedir*)0x401000; +} + +pagetbl * paging_context_x86::operator[](phys_t i) { + if(current_context == this) + return (pagetbl *)(0x800000+(i<<12)); + return (pagetbl *)(0xC00000+(i<<12)); +} diff --git a/kernel/arch/x86/pc/flags.cmake b/kernel/arch/x86/pc/flags.cmake index 1ef2bef..f8f7c29 100644 --- a/kernel/arch/x86/pc/flags.cmake +++ b/kernel/arch/x86/pc/flags.cmake @@ -1,3 +1,3 @@ -SET(PLATFORM_C_FLAGS "-I../../kernel/arch/x86/pc/include -O2") +SET(PLATFORM_C_FLAGS "-I../../kernel/arch/x86/pc/include -O0") SET(PLATFORM_CXX_FLAGS "${PLATFORM_C_FLAGS}") SET(PLATFORM_ASM_FLAGS "${PLATFORM_C_FLAGS}") diff --git a/kernel/arch/x86/pc/start.cpp b/kernel/arch/x86/pc/start.cpp index ff3a5a4..bc49c54 100644 --- a/kernel/arch/x86/pc/start.cpp +++ b/kernel/arch/x86/pc/start.cpp @@ -8,6 +8,7 @@ #include "../../../hw/pc/8259/pic.hpp" #include "../../../hw/pc/idt/idt.hpp" #include "../../../hw/pc/pmm/pmm.hpp" +#include #include static multiboot_info_t *mb_info; @@ -17,6 +18,8 @@ CGATerm term; VESAfb term(mb_info); #endif PMM_MB lpmm(mb_info); + +paging_context_x86 main_context; void main(); extern "C" void start(int eax, multiboot_info_t *ebx) { mb_info = ebx; @@ -27,6 +30,13 @@ void drivers_init() { setMainTTY(&term); --term; initIDT(); + main_context.switch_context(); + main_context.mmap((phys_t)mb_info, mb_info, protection::RW, false); + phys_t start_fb = (phys_t)(mb_info->framebuffer_addr); + phys_t end_fb = start_fb + mb_info->framebuffer_height * mb_info->framebuffer_pitch; + for(phys_t i=start_fb; i\n") if config["ENABLE_FPU"] and not config["ENABLE_SSE"]: reg_struct.write("struct fpu_state {\n") diff --git a/kernel/hw/pc/config.py b/kernel/hw/pc/config.py index bd9a8dc..c4659c9 100644 --- a/kernel/hw/pc/config.py +++ b/kernel/hw/pc/config.py @@ -9,3 +9,4 @@ else: add_driver(False, "8259") add_driver(False, "idt") add_driver(False, "pmm") + diff --git a/kernel/hw/pc/pmm/pmm.cpp b/kernel/hw/pc/pmm/pmm.cpp index 35a3d7f..192c3a2 100644 --- a/kernel/hw/pc/pmm/pmm.cpp +++ b/kernel/hw/pc/pmm/pmm.cpp @@ -28,7 +28,7 @@ auto PMM_MB::isFree(phys_t addr) -> bool { } if(!free) return false; - if(addr == (phys_t)((uintptr_t)mb_info)) + if((addr >= (phys_t)((uintptr_t)mb_info)) && (addr < (phys_t)((uintptr_t)mb_info)+0x1000)) return false; return PMM::isFree(addr); } diff --git a/kernel/src/include/paging.hpp b/kernel/src/include/paging.hpp new file mode 100644 index 0000000..4f90053 --- /dev/null +++ b/kernel/src/include/paging.hpp @@ -0,0 +1,32 @@ +#pragma once +#include +#include +#include + +enum class protection: uint8_t { + NONE=0, + X=1, + W=2, + WX=3, + R=4, + RX=5, + RW=6, + RWX=7 +}; + +struct paging_context { + paging_context(); + virtual ~paging_context(); + virtual void switch_context(); + virtual void map_pagetable(struct paging_context *); + virtual void* mmap(phys_t addr, void* dest, protection prot=protection::RW, bool lazy=true); + virtual void munmap(void* addr); + virtual bool is_mapped(void* addr); + virtual void mprotect(void* addr, protection prot); + virtual bool has_no_exec(); + virtual void *get_exception_address(cpu_state *cpu); + virtual bool lazymap(void* addr, cpu_state *cpu); +}; + +extern paging_context *current_context; +extern bool context_enabled; diff --git a/kernel/src/include/pmm.hpp b/kernel/src/include/pmm.hpp index 0d5510a..5327b4b 100644 --- a/kernel/src/include/pmm.hpp +++ b/kernel/src/include/pmm.hpp @@ -20,10 +20,11 @@ class PMM { PMM(phys_t page_size); virtual ~PMM(); virtual auto operator<<(phys_t page) -> PMM &; ///< Frees a page. O(1) - virtual auto operator>>(phys_t &page) -> PMM &; ///< Allocates a page. O(1) - virtual auto operator,(size_t no_pages) -> phys_t; ///< Allocates multiple pages. O(n²) + virtual auto operator>>(phys_t &page) -> PMM &; ///< Allocates a page. Probably O(log n) + virtual auto operator,(size_t no_pages) -> phys_t; ///< Allocates multiple pages. O(n) virtual auto operator()(phys_t pages,size_t no_pages) -> void; ///< Deallocates multiple pages. O(n) - virtual auto operator&&(phys_t page) -> bool; //Returns true if this page is free. O(n). + virtual auto operator&&(phys_t page) -> bool; //Returns true if this page is free. O(1). + virtual auto setUsed(phys_t page) -> void; //Marks a page as used. O(1). }; /** * This definition is for having a python-like syntax - like `page in pmm` diff --git a/kernel/src/paging.cpp b/kernel/src/paging.cpp new file mode 100644 index 0000000..5efb294 --- /dev/null +++ b/kernel/src/paging.cpp @@ -0,0 +1,36 @@ +#include +paging_context *current_context(nullptr); +bool context_enabled=false; +paging_context::paging_context() {} +paging_context::~paging_context() {} + +void paging_context::switch_context() { + current_context = this; + context_enabled = true; +} + +void paging_context::map_pagetable(struct paging_context *) {} + +void* paging_context::mmap(phys_t addr, void* dest, protection prot, bool lazy) { + return nullptr; +} + +void paging_context::munmap(void *addr) {} + +bool paging_context::is_mapped(void *addr) { + return false; +} + +void paging_context::mprotect(void *addr, protection prot) {} + +bool paging_context::has_no_exec() { + return false; +} + +void *paging_context::get_exception_address(cpu_state *cpu) { + return nullptr; +} + +bool paging_context::lazymap(void *addr, cpu_state *cpu) { + return false; +} diff --git a/kernel/src/pmm.cpp b/kernel/src/pmm.cpp index 22b8157..f0e85df 100644 --- a/kernel/src/pmm.cpp +++ b/kernel/src/pmm.cpp @@ -16,7 +16,12 @@ auto PMM::isFree(phys_t addr) -> bool { return false; return true; } -PMM::PMM(phys_t page_size): page_size(page_size), first_free(0), lowest_page(~0), highest_page(0) {} +PMM::PMM(phys_t page_size): page_size(page_size), first_free(0), lowest_page(~0), highest_page(0) { + for(int i=0;i<32768;i++) { + pmm_bitmap[i]=0; + } + pmm = this; +} void PMM::fill() { for(phys_t i=highest_page; i>=lowest_page && i; i-=page_size) { if(isFree(i)) @@ -97,6 +102,8 @@ auto PMM::operator()(phys_t pages, size_t no_pages) -> void { if(pages < first_free) first_free = pages; for(size_t i=0; i> 5; phys_t bit = pageno & 31; pmm_bitmap[index] |= 1< bool { return false; return true; } +auto PMM::setUsed(phys_t page) -> void { + phys_t pageno = page / page_size; + phys_t index = pageno >> 5; + phys_t bit = pageno & 31; + pmm_bitmap[index] &= ~(1< bool { return mm && a; }