acadia/zion/memory/paging_util.cpp
Drew Galbraith 6f5b65de30 Map user pages in the proper mode.
This causes the user code to execute succesfully.
However now we don't differentiate between syscalls so we pass right
over the exit syscall and continue executing until we fault.
2023-05-29 13:06:08 -07:00

159 lines
4.6 KiB
C++

#include "memory/paging_util.h"
#include "boot/boot_info.h"
#include "debug/debug.h"
#include "memory/physical_memory.h"
#define PRESENT_BIT 0x1
#define READ_WRITE_BIT 0x2
#define USER_MODE_BIT 0x4;
#define SIGN_EXT 0xFFFF0000'00000000
#define RECURSIVE ((uint64_t)0x1FE)
#define PML_OFFSET 39
#define PDP_OFFSET 30
#define PD_OFFSET 21
#define PT_OFFSET 12
// How to recursively index into each page table structure assuming
// the PML4 is recursively mapped at the 510th entry (0x1FE).
#define PML_RECURSE 0xFFFFFF7F'BFDFE000
#define PDP_RECURSE 0xFFFFFF7F'BFC00000
#define PD_RECURSE 0xFFFFFF7F'80000000
#define PT_RECURSE 0xFFFFFF00'00000000
namespace {
uint64_t PageAlign(uint64_t addr) { return addr & ~0xFFF; }
uint64_t* PageAlign(uint64_t* addr) {
return reinterpret_cast<uint64_t*>(reinterpret_cast<uint64_t>(addr) & ~0xFFF);
}
void ZeroOutPage(uint64_t* ptr) {
ptr = PageAlign(ptr);
for (uint64_t i = 0; i < 512; i++) {
ptr[i] = 0;
}
}
uint64_t ShiftForEntryIndexing(uint64_t addr, uint64_t offset) {
addr &= ~0xFFFF0000'00000000;
addr >>= offset;
addr <<= 3;
return addr;
}
uint64_t* Pml4Entry(uint64_t addr) {
return reinterpret_cast<uint64_t*>(PML_RECURSE |
ShiftForEntryIndexing(addr, PML_OFFSET));
}
uint64_t* PageDirectoryPointerEntry(uint64_t addr) {
return reinterpret_cast<uint64_t*>(PDP_RECURSE |
ShiftForEntryIndexing(addr, PDP_OFFSET));
}
uint64_t* PageDirectoryEntry(uint64_t addr) {
return reinterpret_cast<uint64_t*>(PD_RECURSE |
ShiftForEntryIndexing(addr, PD_OFFSET));
}
uint64_t* PageTableEntry(uint64_t addr) {
return reinterpret_cast<uint64_t*>(PT_RECURSE |
ShiftForEntryIndexing(addr, PT_OFFSET));
}
bool PageDirectoryPointerLoaded(uint64_t addr) {
return *Pml4Entry(addr) & PRESENT_BIT;
}
bool PageDirectoryLoaded(uint64_t addr) {
return PageDirectoryPointerLoaded(addr) &&
(*PageDirectoryPointerEntry(addr) & PRESENT_BIT);
}
bool PageTableLoaded(uint64_t addr) {
return PageDirectoryLoaded(addr) && (*PageDirectoryEntry(addr) & PRESENT_BIT);
}
void MapPage(uint64_t virt, uint64_t phys) {
if (PageLoaded(virt)) {
panic("Allocating Over Existing Page: %m", virt);
}
uint64_t access_bits = PRESENT_BIT | READ_WRITE_BIT;
uint64_t higher_half = 0xffff8000'00000000;
if ((virt & higher_half) != higher_half) {
access_bits |= USER_MODE_BIT;
}
if (!PageDirectoryPointerLoaded(virt)) {
uint64_t page = phys_mem::AllocatePage();
*Pml4Entry(virt) = page | access_bits;
ZeroOutPage(PageDirectoryPointerEntry(virt));
}
if (!PageDirectoryLoaded(virt)) {
uint64_t page = phys_mem::AllocatePage();
*PageDirectoryPointerEntry(virt) = page | access_bits;
ZeroOutPage(PageDirectoryEntry(virt));
}
if (!PageTableLoaded(virt)) {
uint64_t page = phys_mem::AllocatePage();
*PageDirectoryEntry(virt) = page | access_bits;
ZeroOutPage(PageTableEntry(virt));
}
*PageTableEntry(virt) = PageAlign(phys) | access_bits;
ZeroOutPage(reinterpret_cast<uint64_t*>(virt));
}
uint64_t Pml4Index(uint64_t addr) { return (addr >> PML_OFFSET) & 0x1FF; }
} // namespace
void InitPaging() {
uint64_t pml4_addr = 0;
asm volatile("mov %%cr3, %0;" : "=r"(pml4_addr));
uint64_t* pml4_virtual =
reinterpret_cast<uint64_t*>(boot::GetHigherHalfDirectMap() + pml4_addr);
uint64_t recursive_entry = pml4_addr | PRESENT_BIT | READ_WRITE_BIT;
pml4_virtual[0x1FE] = recursive_entry;
}
void InitializePml4(uint64_t pml4_physical_addr) {
uint64_t* pml4_virtual = reinterpret_cast<uint64_t*>(
boot::GetHigherHalfDirectMap() + pml4_physical_addr);
// Map the recursive entry.
uint64_t recursive_entry = pml4_physical_addr | PRESENT_BIT | READ_WRITE_BIT;
pml4_virtual[0x1FE] = recursive_entry;
// Map the kernel entry.
// This should contain the heap at 0xFFFFFFFF'40000000
uint64_t kernel_addr = 0xFFFFFFFF'80000000;
pml4_virtual[Pml4Index(kernel_addr)] = *Pml4Entry(kernel_addr);
// Map the HHDM.
// This is necessary to access values off of the kernel stack.
uint64_t hhdm = boot::GetHigherHalfDirectMap();
pml4_virtual[Pml4Index(hhdm)] = *Pml4Entry(hhdm);
}
void AllocatePage(uint64_t addr) {
uint64_t physical_page = phys_mem::AllocatePage();
MapPage(addr, physical_page);
}
void EnsureResident(uint64_t addr, uint64_t size) {
uint64_t max = addr + size;
addr = PageAlign(addr);
while (addr < max) {
if (!PageLoaded(addr)) {
AllocatePage(addr);
}
addr += 0x1000;
}
}
bool PageLoaded(uint64_t addr) {
return PageTableLoaded(addr) && (*PageTableEntry(addr) & PRESENT_BIT);
}