Squash commits for public release

This commit is contained in:
2025-02-12 09:54:05 -05:00
commit 7118adc514
1108 changed files with 80873 additions and 0 deletions

View File

@@ -0,0 +1,61 @@
#ifndef _BOOT_DRIVERS_PORT_H
#define _BOOT_DRIVERS_PORT_H
#include <libboot/types.h>
static inline uint8_t port_read8(uint16_t port)
{
uint8_t result_data;
asm volatile("inb %%dx, %%al"
: "=a"(result_data)
: "d"(port));
return result_data;
}
static inline void port_write8(uint16_t port, uint8_t data)
{
asm volatile("outb %%al, %%dx"
:
: "a"(data), "d"(port));
}
static inline uint16_t port_read16(uint16_t port)
{
uint16_t result_data;
asm volatile("inw %%dx, %%ax"
: "=a"(result_data)
: "d"(port));
return result_data;
}
static inline void port_write16(uint16_t port, uint16_t data)
{
asm volatile("outw %%ax, %%dx"
:
: "a"(data), "d"(port));
}
static inline uint32_t port_read32(uint16_t port)
{
uint32_t result_data;
asm volatile("inl %%dx, %%eax"
: "=a"(result_data)
: "d"(port));
return result_data;
}
static inline void port_write32(uint16_t port, uint32_t data)
{
asm volatile("outl %%eax, %%dx"
:
: "a"(data), "d"(port));
}
static inline void port_wait_io()
{
asm volatile("out %%al, $0x80"
:
: "a"(0));
}
#endif // _BOOT_DRIVERS_PORT_H

View File

@@ -0,0 +1,43 @@
#include "uart.h"
#include "port.h"
static int _uart_setup_impl(int port)
{
port_write8(port + 1, 0x00);
port_write8(port + 3, 0x80);
port_write8(port + 0, 0x03);
port_write8(port + 1, 0x00);
port_write8(port + 3, 0x03);
port_write8(port + 2, 0xC7);
port_write8(port + 4, 0x0B);
return 0;
}
void uart_init()
{
_uart_setup_impl(COM1);
}
static inline bool _uart_is_free_in(int port)
{
return port_read8(port + 5) & 0x01;
}
static inline bool _uart_is_free_out(int port)
{
return port_read8(port + 5) & 0x20;
}
int uart_write(uint8_t data)
{
while (!_uart_is_free_out(COM1)) { }
port_write8(COM1, data);
return 0;
}
int uart_read(uint8_t* data)
{
while (!_uart_is_free_out(COM1)) { }
*data = port_read8(COM1);
return 0;
}

View File

@@ -0,0 +1,15 @@
#ifndef _BOOT_DRIVERS_UART_H
#define _BOOT_DRIVERS_UART_H
#include <libboot/types.h>
#define COM1 0x3F8
#define COM2 0x2F8
#define COM3 0x3E8
#define COM4 0x2E8
void uart_init();
int uart_write(uint8_t data);
int uart_read(uint8_t* data);
#endif //_BOOT_DRIVERS_UART_H

View File

@@ -0,0 +1,153 @@
#include "drivers/uart.h"
#include "vm.h"
#include <libboot/abi/multiboot.h>
#include <libboot/elf/elf_lite.h>
#include <libboot/log/log.h>
#include <libboot/mem/alloc.h>
#include <libboot/types.h>
// #define DEBUG_BOOT
extern void jump_to_kernel(void*, uintptr_t);
static void* bootdesc_paddr;
static void* bootdesc_vaddr;
static size_t kernel_vaddr = 0;
static size_t kernel_paddr = 0;
static size_t kernel_size = 0;
#define LAUNCH_SERVER_PATH "/System/launch_server"
static int alloc_init(uintptr_t base, multiboot_info_t* multiboot)
{
uintptr_t region_base = 0x0;
size_t region_size = 0x0;
multiboot_memory_map_t* memmap = (multiboot_memory_map_t*)(uint64_t)multiboot->mmap_addr;
size_t n = multiboot->mmap_length / sizeof(multiboot_memory_map_t);
// Looking for the zone of memory where we are linked
for (int i = 0; i < n; i++) {
if (memmap[i].type == MULTIBOOT_MEMORY_AVAILABLE) {
if (memmap[i].addr <= base && base < memmap[i].addr + memmap[i].len) {
region_base = memmap[i].addr;
region_size = memmap[i].len;
}
}
}
// We expect the current zone to be quite big, at least 128Mb.
if (region_size < (128 << 20)) {
log("Current space is less than required 128Mbs");
while (1) { }
}
extern uint32_t RAWIMAGE_END[];
uintptr_t start_addr = ROUND_CEIL((uint64_t)RAWIMAGE_END, page_size());
size_t free_space = region_size - (start_addr - region_base);
malloc_init((void*)start_addr, free_space);
#ifdef DEBUG_BOOT
log("malloc inited %llx %llx", start_addr, free_space);
#endif
return 0;
}
static size_t memory_layout_size(multiboot_info_t* multiboot)
{
size_t n = multiboot->mmap_length / sizeof(multiboot_memory_map_t);
return n + 1; // Including the trailing element.
}
static int preserve_alloc_init(size_t kernsize, multiboot_info_t* multiboot)
{
const size_t memmap = ROUND_CEIL(memory_layout_size(multiboot) * sizeof(memory_layout_t), page_size());
const size_t bootargsstruct = ROUND_CEIL(sizeof(boot_args_t), page_size());
const size_t bootargssize = memmap + bootargsstruct;
// 32 tables should be enough for initial mappings.
const size_t prekernelvmsize = 32 * page_size();
const size_t total = ROUND_CEIL(kernsize + bootargssize + prekernelvmsize, page_size());
return palloc_init(total, 2 << 20);
}
static memory_boot_desc_t memory_boot_desc_init(multiboot_info_t* multiboot)
{
size_t ram_last_addr = 0x0;
size_t next_id = 0;
memory_layout_t* mem_layout_paddr = palloc_aligned(memory_layout_size(multiboot) * sizeof(memory_layout_t), page_size());
multiboot_memory_map_t* memmap = (multiboot_memory_map_t*)(uint64_t)multiboot->mmap_addr;
size_t n = multiboot->mmap_length / sizeof(multiboot_memory_map_t);
for (int i = 0; i < n; i++) {
if (memmap[i].type != MULTIBOOT_MEMORY_AVAILABLE) {
// The region is marked as reserved, removing it.
mem_layout_paddr[next_id].base = memmap[i].addr;
mem_layout_paddr[next_id].size = memmap[i].len;
mem_layout_paddr[next_id].flags = 0;
next_id++;
} else {
ram_last_addr = max(ram_last_addr, memmap[i].addr + memmap[i].len);
}
}
mem_layout_paddr[next_id].flags = MEMORY_LAYOUT_FLAG_TERMINATE;
memory_layout_t* mem_layout_vaddr = paddr_to_vaddr(mem_layout_paddr, kernel_paddr, kernel_vaddr);
memory_boot_desc_t res;
res.ram_base = 0x0;
res.ram_size = ram_last_addr;
res.reserved_areas = mem_layout_vaddr;
return res;
}
static void load_kernel(void* kenrelstart, multiboot_info_t* multiboot)
{
kernel_size = elf_get_kernel_size(kenrelstart);
kernel_size = ROUND_CEIL(kernel_size, page_size());
int err = preserve_alloc_init(kernel_size, multiboot);
if (err) {
log("add assert");
while (1) { }
}
int res = elf_load_kernel(kenrelstart, kernel_size, &kernel_vaddr, &kernel_paddr);
#ifdef DEBUG_BOOT
log("kernel %lx %lx %lx", kernel_vaddr, kernel_paddr, kernel_size);
#endif
boot_args_t boot_args;
boot_args.vaddr = kernel_vaddr;
boot_args.paddr = kernel_paddr;
boot_args.kernel_data_size = 0x0; // Sets up later
boot_args.mem_boot_desc = memory_boot_desc_init(multiboot);
boot_args.devtree = NULL;
boot_args.fb_boot_desc.vaddr = 0; // Marking fb as invalid.
memcpy(boot_args.init_process, LAUNCH_SERVER_PATH, sizeof(LAUNCH_SERVER_PATH));
bootdesc_paddr = palloc_aligned(sizeof(boot_args), page_size());
memcpy(bootdesc_paddr, &boot_args, sizeof(boot_args));
bootdesc_vaddr = paddr_to_vaddr(bootdesc_paddr, kernel_paddr, kernel_vaddr);
#ifdef DEBUG_BOOT
log("copying BOOTDESC %lx -> %lx of %d", &boot_args, bootdesc_vaddr, sizeof(boot_args));
#endif
}
int main(uint64_t base, multiboot_info_t* multiboot)
{
uart_init();
log_init(uart_write);
alloc_init(base, multiboot);
extern uint32_t EMBED_KERNEL_START[];
load_kernel((void*)EMBED_KERNEL_START, multiboot);
vm_setup(base, bootdesc_paddr);
#ifdef DEBUG_BOOT
log("Preboot done: booting to OS@%llx", ((boot_args_t*)bootdesc_vaddr)->vaddr);
#endif
((boot_args_t*)bootdesc_vaddr)->kernel_data_size = ROUND_CEIL(palloc_used_size(), page_size());
jump_to_kernel((void*)bootdesc_vaddr, ((boot_args_t*)bootdesc_vaddr)->vaddr);
return 0;
}

View File

@@ -0,0 +1,299 @@
MBOOT_PAGE_ALIGN equ 1<<0
MBOOT_MEM_INFO equ 1<<1
MBOOT_HEADER_MAGIC equ 0x1badb002
MBOOT_HEADER_FLAGS equ MBOOT_PAGE_ALIGN | MBOOT_MEM_INFO
MBOOT_CHECKSUM equ -(MBOOT_HEADER_MAGIC + MBOOT_HEADER_FLAGS)
section .multiboot
dd MBOOT_HEADER_MAGIC
dd MBOOT_HEADER_FLAGS
dd MBOOT_CHECKSUM
dd 0x00000000; header_addr
dd 0x00000000; load_addr
dd 0x00000000; load_end_addr
dd 0x00000000; bss_end_addr
dd 0x00000000; entry_addr
dd 0x00000000; mode_type
dd 0x00000000; width
dd 0x00000000; height
dd 0x00000000; depth
[bits 32]
section .xos_boot_text
extern main
extern PREKERNEL_STACK_TOP
global _start
_start:
global prekernel_entry
prekernel_entry:
cli
cld
; Setting up the stack.
mov ebp, PREKERNEL_STACK_TOP
mov esp, ebp
; Save entry point to one of callee-saved regs.
mov esi, ecx
; PAE is required for x86_64
call check_for_pae
; Checking for long mode.
call check_for_long_mode
; x86_64 required paging enabled, thus 1GB of physical memmory is mapped
; for booting the kernel without any problems.
call setup_tables
; Enabling long mode with a 32-bit compatibility submode.
call enable_long_mode
; Just give control to jumper.
jmp jump_to_entry64
CPUID_FEATURE_PAE equ (1 << 6)
check_for_pae:
push ebp
mov ebp, esp
push ebx
mov eax, 0x1
cpuid
test edx, CPUID_FEATURE_PAE
jz pae_unsupported
pop ebx
pop ebp
ret
pae_unsupported:
push pae_unsupported_msg
call early_boot_print_string
hlt
CPUID_FEATURE_LONG_MODE equ (1 << 29)
check_for_long_mode:
push ebp
mov ebp, esp
push ebx
mov eax, 0x80000001
cpuid
test edx, CPUID_FEATURE_LONG_MODE
jz long_mode_unsupported
pop ebx
pop ebp
ret
long_mode_unsupported:
push long_mode_unsupported_msg
call early_boot_print_string
hlt
setup_tables:
push ebp
mov ebp, esp
push edi
mov edi, table0
xor eax, eax
mov ecx, 4096
rep stosb
mov edi, table1
xor eax, eax
mov ecx, 4096
rep stosb
mov edi, table2
xor eax, eax
mov ecx, 4096
rep stosb
mov eax, table1
add eax, 0x3
mov DWORD [table0], eax
mov eax, table2
add eax, 0x3
mov DWORD [table1], eax
mov edi, table2
mov eax, 0x83
mov ecx, 512
.table_fillup_last:
mov DWORD [edi], eax
add eax, 0x200000
add edi, 8
loop .table_fillup_last
pop edi
pop ebp
ret
enable_long_mode:
push ebp
mov ebp, esp
; Setting table
mov eax, table0
mov cr3, eax
; Enabling PAE
mov eax, cr4
or eax, 1 << 5
mov cr4, eax
; Enabling Long Mode
mov ecx, 0xc0000080
rdmsr
or eax, 1 << 8
wrmsr
mov eax, cr0
or eax, 1 << 31
mov cr0, eax
pop ebp
ret
VIDEO_MEMORY equ 0xb8000
WHITE_ON_BLACK equ 0x0f
global early_boot_print_string
early_boot_print_string:
push ebp
mov ebp, esp
push esi
push ecx
mov esi, [ebp+8]
mov ecx, VIDEO_MEMORY
mov ah, WHITE_ON_BLACK
_print_string_loop:
lodsb; Load from esi to al then increment esi
test al, al
jz _print_string_end
mov [ecx], ax
add ecx, 2
jmp _print_string_loop
_print_string_end:
mov eax, esi
sub eax, [ebp+8]
pop ecx
pop esi
mov esp, ebp
pop ebp
ret
; Access bits
PRESENT equ 1 << 7
NOT_SYS equ 1 << 4
EXEC equ 1 << 3
DC equ 1 << 2
RW equ 1 << 1
ACCESSED equ 1 << 0
; Flags bits
GRAN_4K equ 1 << 7
SZ_32 equ 1 << 6
LONG_MODE equ 1 << 5
align 32
gdt_begin:
gdt_null:
dq 0x0
gdt_code:
dd 0xffff
db 0x0
db PRESENT | NOT_SYS | EXEC | RW
db GRAN_4K | LONG_MODE | 0xF
db 0x0
gdt_data:
dd 0xffff
db 0x0
db PRESENT | NOT_SYS | RW
db GRAN_4K | SZ_32 | 0xF
db 0x0
gdt_tss:
dd 0x00000068
dd 0x00cf8900
gdt_end:
gdt_descriptor:
dw gdt_end - gdt_begin - 1
dd gdt_begin
dd 0x0
CODE_SEG equ gdt_code - gdt_begin
DATA_SEG equ gdt_data - gdt_begin
jump_to_entry64:
lgdt [gdt_descriptor]
jmp CODE_SEG:main_entry64
[bits 64]
main_entry64:
cli
mov ax, DATA_SEG
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
mov ss, ax
mov edi, esi
mov esi, ebx
call main
hlt
global set_cr3
set_cr3:
mov cr3, rdi
ret
global jump_to_kernel
jump_to_kernel:
mov rax, cr3
mov cr3, rax
call rsi
jmp $
pae_unsupported_msg:
db "Required PAE Feature is unavailable, stopping...", 0
long_mode_unsupported_msg:
db "Required Long Mode Feature is unavailable, stopping...", 0
section .bss
table0:
align 4096
resb 4096
table1:
align 4096
resb 4096
table2:
align 4096
resb 4096

108
boot/x86_64/prekernel/vm.c Normal file
View File

@@ -0,0 +1,108 @@
#include "vm.h"
#include <libboot/abi/kernel.h>
#include <libboot/abi/rawimage.h>
#include <libboot/log/log.h>
#include <libboot/mem/alloc.h>
#include <libboot/mem/mem.h>
// #define DEBUG_VM
static uint64_t* global_page_table;
static const uint64_t kernel_base = 0xffff800000000000;
static uint64_t* new_ptable(boot_args_t* args)
{
uint64_t* res = (uint64_t*)palloc_aligned(page_size(), page_size());
memset(res, 0, page_size());
#ifdef DEBUG_VM
log(" alloc ptable %llx %llx", (uint64_t)res, page_size());
#endif
return res;
}
static void map4kb_2mb(boot_args_t* args, size_t phyz, size_t virt)
{
const size_t page_covers = (1ull << PTABLE_LV1_VADDR_OFFSET);
const size_t page_mask = page_covers - 1;
if ((phyz & page_mask) != 0 || (virt & page_mask) != 0) {
return;
}
// Mapping from level3.
uint64_t* page_table = global_page_table;
uint64_t ptable_desc = page_table[VM_VADDR_OFFSET_AT_LEVEL(virt, PTABLE_LV3_VADDR_OFFSET, VMM_LV3_ENTITY_COUNT)];
if (ptable_desc == 0) {
uint64_t* nptbl = new_ptable(args);
uint64_t pdesc = 0x00000000000003;
pdesc |= (uintptr_t)nptbl;
page_table[VM_VADDR_OFFSET_AT_LEVEL(virt, PTABLE_LV3_VADDR_OFFSET, VMM_LV3_ENTITY_COUNT)] = pdesc;
ptable_desc = pdesc;
}
// Level2
page_table = (uint64_t*)(((ptable_desc >> 12) << 12) & 0xffffffffffff);
ptable_desc = page_table[VM_VADDR_OFFSET_AT_LEVEL(virt, PTABLE_LV2_VADDR_OFFSET, VMM_LV2_ENTITY_COUNT)];
if (ptable_desc == 0) {
uint64_t* nptbl = new_ptable(args);
uint64_t pdesc = 0x00000000000003;
pdesc |= (uintptr_t)nptbl;
page_table[VM_VADDR_OFFSET_AT_LEVEL(virt, PTABLE_LV2_VADDR_OFFSET, VMM_LV2_ENTITY_COUNT)] = pdesc;
ptable_desc = pdesc;
}
page_table = (uint64_t*)(((ptable_desc >> 12) << 12) & 0xffffffffffff);
uint64_t pdesc = 0x0000000000083;
pdesc |= (uintptr_t)phyz;
page_table[VM_VADDR_OFFSET_AT_LEVEL(virt, PTABLE_LV1_VADDR_OFFSET, VMM_LV1_ENTITY_COUNT)] = pdesc;
}
// 1Gb huge pages are available as a seperate feature, so use 2Mb pages.
static void map4kb_1gb(boot_args_t* args, size_t phyz, size_t virt)
{
const size_t page_covers = (1ull << PTABLE_LV2_VADDR_OFFSET);
const size_t page_mask = page_covers - 1;
if ((phyz & page_mask) != 0 || (virt & page_mask) != 0) {
return;
}
for (int i = 0; i < 512; i++) {
map4kb_2mb(args, phyz, virt);
phyz += (2 << 20);
virt += (2 << 20);
}
}
void vm_setup(uintptr_t base, boot_args_t* args)
{
global_page_table = (uint64_t*)palloc_aligned(page_size(), page_size());
memset(global_page_table, 0, page_size());
const size_t map_range_2mb = (2 << 20);
const size_t map_range_1gb = (1 << 30);
// Mapping kernel vaddr to paddr
size_t kernel_size_to_map = palloc_total_size() + shadow_area_size();
size_t kernel_range_count_to_map = (kernel_size_to_map + (map_range_2mb - 1)) / map_range_2mb;
for (size_t i = 0; i < kernel_range_count_to_map; i++) {
#ifdef DEBUG_VM
log("mapping %lx %lx", args->paddr + i * map_range_2mb, args->vaddr + i * map_range_2mb);
#endif
map4kb_2mb(args, args->paddr + i * map_range_2mb, args->vaddr + i * map_range_2mb);
}
// Mapping RAM
size_t ram_base = ROUND_FLOOR(args->mem_boot_desc.ram_base, map_range_1gb);
size_t ram_size = args->mem_boot_desc.ram_size;
size_t ram_range_count_to_map = (ram_size + (map_range_1gb - 1)) / map_range_1gb;
for (size_t i = 0; i < ram_range_count_to_map; i++) {
#ifdef DEBUG_VM
log("mapping %lx %lx", ram_base + i * map_range_1gb, ram_base + i * map_range_1gb);
#endif
map4kb_1gb(args, ram_base + i * map_range_1gb, ram_base + i * map_range_1gb);
}
extern void set_cr3(void* cr);
set_cr3(global_page_table);
}

View File

@@ -0,0 +1,24 @@
#ifndef _BOOT_VM_H
#define _BOOT_VM_H
#include <libboot/abi/memory.h>
#include <libboot/types.h>
#define VMM_LV0_ENTITY_COUNT (512)
#define VMM_LV1_ENTITY_COUNT (512)
#define VMM_LV2_ENTITY_COUNT (512)
#define VMM_LV3_ENTITY_COUNT (512)
#define PTABLE_LV_TOP (3)
#define PTABLE_LV0_VADDR_OFFSET (12)
#define PTABLE_LV1_VADDR_OFFSET (21)
#define PTABLE_LV2_VADDR_OFFSET (30)
#define PTABLE_LV3_VADDR_OFFSET (39)
#define VM_VADDR_OFFSET_AT_LEVEL(vaddr, off, ent) ((vaddr >> off) % ent)
static size_t page_size() { return 0x1000; }
void vm_setup(uintptr_t base, boot_args_t* args);
#endif // _BOOT_VM_H