Squash commits for public release

This commit is contained in:
2025-02-12 09:54:05 -05:00
commit 7118adc514
1108 changed files with 80873 additions and 0 deletions

172
libs/libc/malloc/malloc.c Normal file
View File

@@ -0,0 +1,172 @@
#include "malloc.h"
#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#define ALIGNMENT (4)
#define DEVIDE_SPACE_BOUNDARY (32)
static malloc_header_t* memory[MALLOC_MAX_ALLOCATED_BLOCKS];
static size_t allocated_blocks = 0;
static int _alloc_new_block(size_t sz);
static int _alloc_new_block(size_t sz)
{
// TODO: This should be expendable.
assert(allocated_blocks < MALLOC_MAX_ALLOCATED_BLOCKS);
sz += sizeof(malloc_header_t);
// This reduces system calls for the small memory allocations.
size_t allocated_sz = sz > MALLOC_DEFAULT_BLOCK_SIZE ? sz : MALLOC_DEFAULT_BLOCK_SIZE;
intptr_t ret = (intptr_t)mmap(NULL, allocated_sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
if (ret < 0) {
return -1;
}
memory[allocated_blocks] = (void*)ret;
memory[allocated_blocks]->flags = 0;
memory[allocated_blocks]->next = 0;
memory[allocated_blocks]->prev = 0;
memory[allocated_blocks]->size = allocated_sz - sizeof(malloc_header_t);
allocated_blocks++;
return 0;
}
static inline char _malloc_need_to_divide_space(malloc_header_t* space, size_t alloc_size)
{
return alloc_size + DEVIDE_SPACE_BOUNDARY <= space->size;
}
static inline char _malloc_can_fit_allocation(malloc_header_t* space, size_t alloc_size)
{
uint32_t add[] = { 0, sizeof(malloc_header_t) };
return space->size >= (alloc_size + add[_malloc_need_to_divide_space(space, alloc_size)]);
}
void* malloc(size_t sz)
{
if (!sz) {
return NULL;
}
sz += (ALIGNMENT - 1);
sz &= ~(uint32_t)(ALIGNMENT - 1);
void* res = slab_alloc(sz);
if (res) {
return res;
}
// Iterating over allocated by mmap blocks to find a first fit memory chunk.
malloc_header_t* first_fit = NULL;
for (size_t i = 0; i < allocated_blocks; i++) {
malloc_header_t* cur_block = memory[i];
while (cur_block->next && !(block_is_free(cur_block) && _malloc_can_fit_allocation(cur_block, sz))) {
cur_block = cur_block->next;
}
if (block_is_free(cur_block) && _malloc_can_fit_allocation(cur_block, sz)) {
first_fit = cur_block;
break;
}
}
if (!first_fit) {
int err = _alloc_new_block(sz);
if (err) {
return NULL;
}
first_fit = memory[allocated_blocks - 1];
}
malloc_header_t* copy_next = first_fit->next;
size_t copy_size = first_fit->size;
first_fit->flags |= FLAG_ALLOCATED;
if (_malloc_need_to_divide_space(first_fit, sz)) {
first_fit->size = sz;
first_fit->next = (malloc_header_t*)((uintptr_t)first_fit + sz + sizeof(malloc_header_t));
// Adjust the firstfit chunk.
first_fit->next->flags = 0;
first_fit->next->size = copy_size - sz - sizeof(malloc_header_t);
first_fit->next->next = copy_next;
first_fit->next->prev = first_fit;
if (first_fit->next->next) {
first_fit->next->next->prev = first_fit->next;
}
}
return (void*)&((malloc_header_t*)first_fit)[1];
}
void free(void* mem)
{
if (!mem) {
return;
}
malloc_header_t* mem_header = &((malloc_header_t*)mem)[-1];
if (block_is_slab(mem_header)) {
return slab_free(mem_header);
}
block_rem_flags(mem_header, FLAG_ALLOCATED);
while (mem_header->prev && block_is_free(mem_header->prev)) {
mem_header = mem_header->prev;
}
// Trying to glue the freed chunk with its neighbours.
while (mem_header->next && block_is_free(mem_header->next)) {
mem_header->size += mem_header->next->size + sizeof(malloc_header_t);
if (mem_header->next->next) {
mem_header->next->next->prev = mem_header;
}
mem_header->next = mem_header->next->next;
}
}
void* calloc(size_t num, size_t size)
{
void* mem = malloc(num * size);
if (!mem) {
return NULL;
}
memset(mem, 0, num * size);
return mem;
}
void* realloc(void* ptr, size_t new_size)
{
if (!ptr) {
return malloc(new_size);
}
size_t old_size = ((malloc_header_t*)ptr)[-1].size;
if (old_size == new_size) {
return ptr;
}
uint8_t* new_area = malloc(new_size);
if (!new_area) {
return NULL;
}
memcpy(new_area, ptr, new_size < old_size ? new_size : old_size);
free(ptr);
return new_area;
}
void _malloc_init()
{
_slab_init();
}

68
libs/libc/malloc/malloc.h Normal file
View File

@@ -0,0 +1,68 @@
#ifndef _LIBC_MALLOC_MALLOC_H
#define _LIBC_MALLOC_MALLOC_H
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <sys/cdefs.h>
#include <sys/types.h>
__BEGIN_DECLS
#define MALLOC_DEFAULT_BLOCK_SIZE 4096
#define MALLOC_MAX_ALLOCATED_BLOCKS 256
#define FLAG_ALLOCATED (0x1)
#define FLAG_SLAB (0x2)
struct __malloc_header {
size_t size;
uint32_t flags;
struct __malloc_header* next;
struct __malloc_header* prev;
};
typedef struct __malloc_header malloc_header_t;
static inline bool block_has_flags(malloc_header_t* block, uint32_t flags)
{
return ((block->flags & flags) == flags);
}
static inline void block_set_flags(malloc_header_t* block, uint32_t flags)
{
block->flags |= flags;
}
static inline void block_rem_flags(malloc_header_t* block, uint32_t flags)
{
block->flags &= (~flags);
}
static inline bool block_is_allocated(malloc_header_t* block)
{
return block_has_flags(block, FLAG_ALLOCATED);
}
static inline bool block_is_free(malloc_header_t* block)
{
return !block_has_flags(block, FLAG_ALLOCATED);
}
static inline bool block_is_slab(malloc_header_t* block)
{
return block_has_flags(block, FLAG_SLAB);
}
void _malloc_init();
void _slab_init();
void* malloc(size_t);
void free(void*);
void* calloc(size_t, size_t);
void* realloc(void*, size_t);
void* slab_alloc(size_t);
void slab_free(malloc_header_t* mem_header);
__END_DECLS
#endif // _LIBC_MALLOC_MALLOC_H

79
libs/libc/malloc/slab.c Normal file
View File

@@ -0,0 +1,79 @@
#include "malloc.h"
#include <stdbool.h>
#include <string.h>
#include <sys/mman.h>
// Fast allocator for sizes 16, 32, 48, 64 bytes
malloc_header_t* free_blocks[5];
void prepare_free_blocks(size_t size)
{
int block_id = size >> 4;
const size_t alloc_size = MALLOC_DEFAULT_BLOCK_SIZE;
intptr_t ret = (intptr_t)mmap(NULL, alloc_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
if (ret < 0) {
free_blocks[block_id] = NULL;
return;
}
void* raw_area = (void*)ret;
const size_t sizeof_block_with_header = size + sizeof(malloc_header_t);
size_t passed = 0;
free_blocks[block_id] = (malloc_header_t*)raw_area;
malloc_header_t* prev_header = NULL;
while (passed + sizeof_block_with_header <= alloc_size) {
malloc_header_t* current = (malloc_header_t*)raw_area;
current->flags = FLAG_SLAB | FLAG_ALLOCATED;
current->next = NULL;
current->prev = prev_header;
current->size = size;
prev_header = current;
raw_area += sizeof_block_with_header;
passed += sizeof_block_with_header;
}
malloc_header_t* next_header = NULL;
while (prev_header) {
prev_header->next = next_header;
prev_header = prev_header->prev;
}
}
void _slab_init()
{
prepare_free_blocks(16);
prepare_free_blocks(32);
prepare_free_blocks(48);
prepare_free_blocks(64);
}
void* slab_alloc(size_t size)
{
if (size > 64) {
return NULL;
}
int block_id = ((size + 15) >> 4);
malloc_header_t* zone = free_blocks[block_id];
if (!zone) {
return NULL;
}
free_blocks[block_id] = zone->next;
return (void*)&((malloc_header_t*)zone)[1];
}
void slab_free(malloc_header_t* mem_header)
{
malloc_header_t* next = free_blocks[((mem_header->size) >> 4)];
free_blocks[((mem_header->size) >> 4)] = mem_header;
mem_header->next = next;
mem_header->prev = NULL;
if (next) {
next->prev = mem_header;
}
}