genusOS/src/map.c

485 lines
12 KiB
C

#include "alloc.h"
#include "common.h"
#include "print.h"
// MMIO (Memory-Mapped I/O) Mapping for ACPI and other hardware registers
//
// CRITICAL: MMIO regions are NOT regular RAM!
// - They're not in Limine's memory map as "usable"
// - They're often above the RAM limit
// - Limine does NOT map them automatically
// - You must manually map them before accessing
// Page table entry structure
struct page_entry {
u64 present : 1;
u64 writable : 1;
u64 user : 1;
u64 write_through : 1;
u64 cache_disabled : 1; // IMPORTANT for MMIO!
u64 accessed : 1;
u64 dirty : 1;
u64 huge_page : 1;
u64 global : 1;
u64 unused : 3;
u64 frame : 40;
u64 reserved : 11;
u64 no_execute : 1;
};
extern u64 hhdm_offset;
// Get current page tables
static u64 get_cr3() {
u64 cr3;
asm volatile("mov %%cr3, %0" : "=r"(cr3));
return cr3 & ~0xFFF;
}
static u64 alloc_page_table() {
void *a = alloc((1 << 12) / PAGE_SIZE);
if (!a) {
print("Failed to allocate page table\n");
}
return a ? virt_to_phys(a) : 0;
}
// Get or create a page table entry at any level
static struct page_entry *get_or_create_table(struct page_entry *table,
u64 index) {
if (!table[index].present) {
u64 new_table_phys = alloc_page_table();
if (new_table_phys == 0) {
print("failed to alloc\n");
return NULL;
}
table[index].present = 1;
table[index].writable = 1;
table[index].user = 0;
table[index].frame = new_table_phys >> 12;
}
u64 next_table_phys = (u64)table[index].frame << 12;
return (struct page_entry *)phys_to_virt(next_table_phys);
}
// Map a MMIO page with proper cache settings
// physical_addr: Physical address of MMIO region
// size: Size in bytes (will be rounded up to 4KB pages)
// Returns: Virtual address to access the MMIO region
void *map_mmio(u64 physical_addr, u64 size) {
print("Mapping MMIO region:\n");
print(" Physical: 0x");
print64(physical_addr);
print("\n Size: 0x");
print64(size);
print(" (");
print64(size >> 20);
print(" MiB)\n");
// Align physical address down to page boundary
u64 phys_base = physical_addr & ~(PAGE_SIZE - 1);
u64 offset_in_page = physical_addr & (PAGE_SIZE - 1);
// Calculate number of pages needed
u64 total_size = size + offset_in_page;
u64 num_pages = (total_size + PAGE_SIZE - 1) / PAGE_SIZE;
print(" Aligned physical: 0x");
print64(phys_base);
print("\n Offset in page: 0x");
print64(offset_in_page);
print("\n Pages needed: ");
print64(num_pages);
print("\n");
// For MMIO, we typically map to the HHDM region
// Virtual address = Physical address + HHDM offset
u64 virt_base = phys_base + hhdm_offset;
print(" Target virtual: 0x");
print64(virt_base);
print("\n");
// Get current page tables
u64 pml4_phys = get_cr3();
struct page_entry *pml4 = (struct page_entry *)phys_to_virt(pml4_phys);
// Map each page
for (u64 i = 0; i < num_pages; i++) {
u64 current_phys = phys_base + (i * PAGE_SIZE);
u64 current_virt = virt_base + (i * PAGE_SIZE);
// Extract indices
u64 pml4_index = (current_virt >> 39) & 0x1FF;
u64 pdpt_index = (current_virt >> 30) & 0x1FF;
u64 pd_index = (current_virt >> 21) & 0x1FF;
u64 pt_index = (current_virt >> 12) & 0x1FF;
// Walk/create page table hierarchy
struct page_entry *pdpt = get_or_create_table(pml4, pml4_index);
if (!pdpt) {
print("ERROR: Failed to get/create PDPT\n");
return NULL;
}
struct page_entry *pd = get_or_create_table(pdpt, pdpt_index);
if (!pd) {
print("ERROR: Failed to get/create PD\n");
return NULL;
}
struct page_entry *pt = get_or_create_table(pd, pd_index);
if (!pt) {
print("ERROR: Failed to get/create PT\n");
return NULL;
}
// Map the page with MMIO-appropriate settings
pt[pt_index].present = 1;
pt[pt_index].writable = 1;
pt[pt_index].user = 0;
pt[pt_index].cache_disabled = 1; // CRITICAL: Disable caching for MMIO!
pt[pt_index].write_through = 0;
pt[pt_index].no_execute = 1; // Don't execute from MMIO
pt[pt_index].frame = current_phys >> 12;
// Flush TLB for this page
asm volatile("invlpg (%0)" ::"r"(current_virt) : "memory");
}
print(" MMIO mapped successfully!\n");
print(" Access at virtual: 0x");
print64(virt_base + offset_in_page);
print("\n\n");
// Return virtual address (with original offset)
return (void *)(virt_base + offset_in_page);
}
// Test correct mapping
int test_if_mapped(u64 virt_addr) {
// Try to read - if we get page fault, it's not mapped
// For now, just check the page tables manually
u64 pml4_idx = (virt_addr >> 39) & 0x1FF;
u64 pdpt_idx = (virt_addr >> 30) & 0x1FF;
u64 pd_idx = (virt_addr >> 21) & 0x1FF;
u64 pt_idx = (virt_addr >> 12) & 0x1FF;
// Get CR3
u64 cr3;
asm volatile("mov %%cr3, %0" : "=r"(cr3));
u64 pml4_phys = cr3 & ~0xFFF;
struct page_entry {
u64 present : 1;
u64 writable : 1;
u64 user : 1;
u64 write_through : 1;
u64 cache_disabled : 1;
u64 accessed : 1;
u64 dirty : 1;
u64 huge_page : 1;
u64 global : 1;
u64 unused : 3;
u64 frame : 40;
u64 reserved : 11;
u64 no_execute : 1;
};
// Access PML4
struct page_entry *pml4 = (struct page_entry *)(pml4_phys + hhdm_offset);
if (!pml4[pml4_idx].present) {
print(" PML4[");
print64(pml4_idx);
print("] NOT PRESENT\n");
return 0;
}
// Access PDPT
struct page_entry *pdpt =
(struct page_entry *)(((u64)pml4[pml4_idx].frame << 12) + hhdm_offset);
if (!pdpt[pdpt_idx].present) {
print(" PDPT[");
print64(pdpt_idx);
print("] NOT PRESENT\n");
return 0;
}
// Access PD
struct page_entry *pd =
(struct page_entry *)(((u64)pdpt[pdpt_idx].frame << 12) + hhdm_offset);
if (!pd[pd_idx].present) {
print(" PD[");
print64(pd_idx);
print("] NOT PRESENT\n");
return 0;
}
// Access PT
struct page_entry *pt =
(struct page_entry *)(((u64)pd[pd_idx].frame << 12) + hhdm_offset);
if (!pt[pt_idx].present) {
print(" PT[");
print64(pt_idx);
print("] NOT PRESENT\n");
return 0;
}
print(" All levels present\n");
print(" Physical page: 0x");
print64((u64)pt[pt_idx].frame << 12);
print("\n");
print(" Cache disabled: ");
print8(pt[pt_idx].cache_disabled);
print("\n");
return 1;
}
// Unmap MMIO region
void unmap_mmio(void *virtual_addr, u64 size) {
u64 virt_base = (u64)virtual_addr & ~0xFFF;
u64 num_pages = (size + ((u64)virtual_addr & 0xFFF) + 0xFFF) / 4096;
u64 pml4_phys = get_cr3();
struct page_entry *pml4 = (struct page_entry *)phys_to_virt(pml4_phys);
for (u64 i = 0; i < num_pages; i++) {
u64 current_virt = virt_base + (i * 4096);
u64 pml4_index = (current_virt >> 39) & 0x1FF;
u64 pdpt_index = (current_virt >> 30) & 0x1FF;
u64 pd_index = (current_virt >> 21) & 0x1FF;
u64 pt_index = (current_virt >> 12) & 0x1FF;
if (!pml4[pml4_index].present)
continue;
struct page_entry *pdpt = phys_to_virt((u64)pml4[pml4_index].frame << 12);
if (!pdpt[pdpt_index].present)
continue;
struct page_entry *pd = phys_to_virt((u64)pdpt[pdpt_index].frame << 12);
if (!pd[pd_index].present)
continue;
struct page_entry *pt = phys_to_virt((u64)pd[pd_index].frame << 12);
if (!pt[pt_index].present)
continue;
// Clear the entry
*((u64 *)&pt[pt_index]) = 0;
// Flush TLB
asm volatile("invlpg (%0)" ::"r"(current_virt) : "memory");
}
}
// Check if a physical address is already mapped
int is_mapped(u64 physical_addr) {
u64 virtual_addr = physical_addr + hhdm_offset;
u64 pml4_phys = get_cr3();
struct page_entry *pml4 = phys_to_virt(pml4_phys);
u64 pml4_index = (virtual_addr >> 39) & 0x1FF;
u64 pdpt_index = (virtual_addr >> 30) & 0x1FF;
u64 pd_index = (virtual_addr >> 21) & 0x1FF;
u64 pt_index = (virtual_addr >> 12) & 0x1FF;
if (!pml4[pml4_index].present)
return 0;
struct page_entry *pdpt = phys_to_virt((u64)pml4[pml4_index].frame << 12);
if (!pdpt[pdpt_index].present)
return 0;
// Check for 1GB huge page
if (pdpt[pdpt_index].huge_page)
return 1;
struct page_entry *pd = phys_to_virt((u64)pdpt[pdpt_index].frame << 12);
if (!pd[pd_index].present)
return 0;
// Check for 2MB huge page
if (pd[pd_index].huge_page)
return 1;
struct page_entry *pt = phys_to_virt((u64)pd[pd_index].frame << 12);
return pt[pt_index].present;
}
// Read from ACPI register (8-bit)
u8 acpi_read8(void *mmio_base, u64 offset) {
volatile u8 *addr = (volatile u8 *)((u64)mmio_base + offset);
return *addr;
}
// Read from ACPI register (16-bit)
u16 acpi_read16(void *mmio_base, u64 offset) {
volatile u16 *addr = (volatile u16 *)((u64)mmio_base + offset);
return *addr;
}
// Read from ACPI register (32-bit)
u32 acpi_read32(void *mmio_base, u64 offset) {
volatile u32 *addr = (volatile u32 *)((u64)mmio_base + offset);
return *addr;
}
// Read from ACPI register (64-bit)
u64 acpi_read64(void *mmio_base, u64 offset) {
volatile u64 *addr = (volatile u64 *)((u64)mmio_base + offset);
return *addr;
}
// Write to ACPI register (8-bit)
void acpi_write8(void *mmio_base, u64 offset, u8 value) {
volatile u8 *addr = (volatile u8 *)((u64)mmio_base + offset);
*addr = value;
}
// Write to ACPI register (16-bit)
void acpi_write16(void *mmio_base, u64 offset, u16 value) {
volatile u16 *addr = (volatile u16 *)((u64)mmio_base + offset);
*addr = value;
}
// Write to ACPI register (32-bit)
void acpi_write32(void *mmio_base, u64 offset, u32 value) {
volatile u32 *addr = (volatile u32 *)((u64)mmio_base + offset);
*addr = value;
}
// Write to ACPI register (64-bit)
void acpi_write64(void *mmio_base, u64 offset, u64 value) {
volatile u64 *addr = (volatile u64 *)((u64)mmio_base + offset);
*addr = value;
}
// Example: Map and access ACPI PM1a Control Block
void *map_acpi_pm1a_control(u64 physical_addr) {
print("Mapping ACPI PM1a Control Block...\n");
// PM1a Control is typically 2 bytes, but map at least one page
void *mmio = map_mmio(physical_addr, 4096);
if (mmio) {
print("PM1a Control Block mapped at virtual: 0x");
print64((u64)mmio);
print("\n");
}
return mmio;
}
// Example: Map ACPI tables (FADT, MADT, etc.)
void *map_acpi_table(u64 physical_addr, u64 size) {
print("Mapping ACPI table...\n");
print(" Physical: 0x");
print64(physical_addr);
print("\n Size: ");
print64(size);
print(" bytes\n");
void *table = map_mmio(physical_addr, size);
if (table) {
print("ACPI table mapped at virtual: 0x");
print64((u64)table);
print("\n");
}
return table;
}
void print_page_mapping(u64 virtual_addr) {
print("Page mapping for virtual 0x");
print64(virtual_addr);
print(":\n");
u64 pml4_phys = get_cr3();
struct page_entry *pml4 = phys_to_virt(pml4_phys);
u64 pml4_index = (virtual_addr >> 39) & 0x1FF;
u64 pdpt_index = (virtual_addr >> 30) & 0x1FF;
u64 pd_index = (virtual_addr >> 21) & 0x1FF;
u64 pt_index = (virtual_addr >> 12) & 0x1FF;
print(" Indices: PML4[");
print64(pml4_index);
print("] PDPT[");
print64(pdpt_index);
print("] PD[");
print64(pd_index);
print("] PT[");
print64(pt_index);
print("]\n");
if (!pml4[pml4_index].present) {
print(" PML4 entry not present!\n");
return;
}
print(" PML4 entry present, frame=0x");
print64((u64)pml4[pml4_index].frame << 12);
print("\n");
struct page_entry *pdpt = phys_to_virt((u64)pml4[pml4_index].frame << 12);
if (!pdpt[pdpt_index].present) {
print(" PDPT entry not present!\n");
return;
}
print(" PDPT entry present, frame=0x");
print64((u64)pdpt[pdpt_index].frame << 12);
print("\n");
if (pdpt[pdpt_index].huge_page) {
print(" 1GB huge page!\n");
return;
}
struct page_entry *pd = phys_to_virt((u64)pdpt[pdpt_index].frame << 12);
if (!pd[pd_index].present) {
print(" PD entry not present!\n");
return;
}
print(" PD entry present, frame=0x");
print64((u64)pd[pd_index].frame << 12);
print("\n");
if (pd[pd_index].huge_page) {
print(" 2MB huge page!\n");
return;
}
struct page_entry *pt = phys_to_virt((u64)pd[pd_index].frame << 12);
if (!pt[pt_index].present) {
print(" PT entry not present!\n");
return;
}
print(" PT entry present:\n");
print(" Physical frame: 0x");
print64((u64)pt[pt_index].frame << 12);
print("\n Writable: ");
print64(pt[pt_index].writable);
print("\n Cache disabled: ");
print64(pt[pt_index].cache_disabled);
print("\n No execute: ");
print64(pt[pt_index].no_execute);
print("\n");
}