[Zion] Move to StrFormat for debug line.
This commit is contained in:
parent
4af19d010f
commit
69aced2220
23 changed files with 142 additions and 89 deletions
|
|
@ -36,32 +36,32 @@ void KernelHeap::InitializeSlabAllocators() {
|
|||
|
||||
void* KernelHeap::Allocate(uint64_t size) {
|
||||
#if K_HEAP_DEBUG
|
||||
dbgln("Alloc (%x)", size);
|
||||
dbgln("Alloc ({x})", size);
|
||||
#endif
|
||||
if ((size <= 8) && slab_8_) {
|
||||
auto ptr_or = slab_8_->Allocate();
|
||||
if (ptr_or.ok()) {
|
||||
return ptr_or.value();
|
||||
}
|
||||
dbgln("Failed allocation (slab 8): %x", ptr_or.error());
|
||||
dbgln("Failed allocation (slab 8): {x}", ptr_or.error());
|
||||
}
|
||||
if ((size <= 16) && slab_16_) {
|
||||
auto ptr_or = slab_16_->Allocate();
|
||||
if (ptr_or.ok()) {
|
||||
return ptr_or.value();
|
||||
}
|
||||
dbgln("Failed allocation (slab 16): %x", ptr_or.error());
|
||||
dbgln("Failed allocation (slab 16): {x}", ptr_or.error());
|
||||
}
|
||||
if ((size <= 32) && slab_32_) {
|
||||
auto ptr_or = slab_32_->Allocate();
|
||||
if (ptr_or.ok()) {
|
||||
return ptr_or.value();
|
||||
}
|
||||
dbgln("Failed allocation (slab 32): %x", ptr_or.error());
|
||||
dbgln("Failed allocation (slab 32): {x}", ptr_or.error());
|
||||
}
|
||||
if (next_addr_ + size >= upper_bound_) {
|
||||
panic("Kernel Heap Overrun (next, size, max): %m, %x, %m", next_addr_, size,
|
||||
upper_bound_);
|
||||
panic("Kernel Heap Overrun (next, size, max): {x}, {x}, {x}", next_addr_,
|
||||
size, upper_bound_);
|
||||
}
|
||||
#if K_HEAP_DEBUG
|
||||
RecordSize(size);
|
||||
|
|
@ -75,18 +75,18 @@ void* KernelHeap::Allocate(uint64_t size) {
|
|||
void KernelHeap::DumpDistribution() {
|
||||
#if K_HEAP_DEBUG
|
||||
uint64_t* distributions = gKernelHeap->distributions;
|
||||
dbgln("<=4B: %u", distributions[0]);
|
||||
dbgln("<=8B: %u", distributions[1]);
|
||||
dbgln("<=16B: %u", distributions[2]);
|
||||
dbgln("<=32B: %u", distributions[3]);
|
||||
dbgln("<=64B: %u", distributions[4]);
|
||||
dbgln("<=128B: %u", distributions[5]);
|
||||
dbgln("<=256B: %u", distributions[6]);
|
||||
dbgln("<=512B: %u", distributions[7]);
|
||||
dbgln("<=1KiB: %u", distributions[8]);
|
||||
dbgln("<=2KiB: %u", distributions[9]);
|
||||
dbgln("<=4KiB: %u", distributions[10]);
|
||||
dbgln("> 4KiB: %u", distributions[11]);
|
||||
dbgln("<=4B: {}", distributions[0]);
|
||||
dbgln("<=8B: {}", distributions[1]);
|
||||
dbgln("<=16B: {}", distributions[2]);
|
||||
dbgln("<=32B: {}", distributions[3]);
|
||||
dbgln("<=64B: {}", distributions[4]);
|
||||
dbgln("<=128B: {}", distributions[5]);
|
||||
dbgln("<=256B: {}", distributions[6]);
|
||||
dbgln("<=512B: {}", distributions[7]);
|
||||
dbgln("<=1KiB: {}", distributions[8]);
|
||||
dbgln("<=2KiB: {}", distributions[9]);
|
||||
dbgln("<=4KiB: {}", distributions[10]);
|
||||
dbgln("> 4KiB: {}", distributions[11]);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -30,5 +30,5 @@ uint64_t* KernelStackManager::AllocateKernelStack() {
|
|||
|
||||
void KernelStackManager::FreeKernelStack(uint64_t stack_base) {
|
||||
freed_stack_cnt_++;
|
||||
dbgln("Freed kernel stacks using %u KiB", freed_stack_cnt_ * 12);
|
||||
dbgln("Freed kernel stacks using {} KiB", freed_stack_cnt_ * 12);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ class PhysicalMemoryManager {
|
|||
for (uint64_t i = 0; i < memmap.entry_count; i++) {
|
||||
const limine_memmap_entry& entry = *memmap.entries[i];
|
||||
#if K_PHYS_DEBUG
|
||||
dbgln("Region(%u) at %m:%x", entry.type, entry.base, entry.length);
|
||||
dbgln("Region({}) at {x}:{x}", entry.type, entry.base, entry.length);
|
||||
#endif
|
||||
if (entry.type == 0) {
|
||||
uint64_t base = entry.base;
|
||||
|
|
@ -46,7 +46,7 @@ class PhysicalMemoryManager {
|
|||
base = gBootstrap.next_page;
|
||||
uint64_t bootstrap_used = gBootstrap.next_page - gBootstrap.init_page;
|
||||
#if K_PHYS_DEBUG
|
||||
dbgln("[PMM] Taking over from bootstrap, used: %x", bootstrap_used);
|
||||
dbgln("[PMM] Taking over from bootstrap, used: {x}", bootstrap_used);
|
||||
#endif
|
||||
size -= bootstrap_used;
|
||||
}
|
||||
|
|
@ -73,7 +73,7 @@ class PhysicalMemoryManager {
|
|||
delete temp;
|
||||
}
|
||||
#if K_PHYS_DEBUG
|
||||
dbgln("Single %m", page);
|
||||
dbgln("Single {x}", page);
|
||||
#endif
|
||||
return page;
|
||||
}
|
||||
|
|
@ -88,7 +88,7 @@ class PhysicalMemoryManager {
|
|||
|
||||
MemBlock* block = front_;
|
||||
while (block != nullptr && block->num_pages < num_pages) {
|
||||
dbgln("Skipping block of size %u seeking %u", block->num_pages,
|
||||
dbgln("Skipping block of size {} seeking {}", block->num_pages,
|
||||
num_pages);
|
||||
block = block->next;
|
||||
}
|
||||
|
|
@ -106,7 +106,7 @@ class PhysicalMemoryManager {
|
|||
delete temp;
|
||||
}
|
||||
#if K_PHYS_DEBUG
|
||||
dbgln("Continuous %m:%u", page, num_pages);
|
||||
dbgln("Continuous {x}:{}", page, num_pages);
|
||||
#endif
|
||||
return page;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ uint64_t UserStackManager::NewUserStack() {
|
|||
|
||||
void UserStackManager::FreeUserStack(uint64_t stack_ptr) {
|
||||
freed_stacks_++;
|
||||
dbgln("%u freed user stacks", freed_stacks_);
|
||||
dbgln("{} freed user stacks", freed_stacks_);
|
||||
}
|
||||
|
||||
bool UserStackManager::IsValidStack(uint64_t vaddr) {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue