Squash commits for public release

This commit is contained in:
2025-02-12 09:54:05 -05:00
commit 7118adc514
1108 changed files with 80873 additions and 0 deletions

View File

@@ -0,0 +1,20 @@
#ifndef _KERNEL_ALGO_BITMAP_H
#define _KERNEL_ALGO_BITMAP_H
#include <libkern/types.h>
struct bitmap {
uint8_t* data;
size_t len;
};
typedef struct bitmap bitmap_t;
bitmap_t bitmap_wrap(uint8_t* data, size_t len);
bitmap_t bitmap_allocate(size_t len);
int bitmap_find_space(bitmap_t bitmap, int req);
int bitmap_find_space_aligned(bitmap_t bitmap, int req, int alignment);
int bitmap_set(bitmap_t bitmap, int where);
int bitmap_unset(bitmap_t bitmap, int where);
int bitmap_set_range(bitmap_t bitmap, int start, int len);
int bitmap_unset_range(bitmap_t bitmap, int start, int len);
#endif //_KERNEL_ALGO_BITMAP_H

View File

@@ -0,0 +1,34 @@
#ifndef _KERNEL_ALGO_DYNAMIC_ARRAY_H
#define _KERNEL_ALGO_DYNAMIC_ARRAY_H
#include <libkern/types.h>
// TODO: Speed up bucket search using binary jumps.
struct dynamic_array_bucket {
void* data;
struct dynamic_array_bucket* next;
size_t capacity;
size_t size;
};
typedef struct dynamic_array_bucket dynamic_array_bucket_t;
struct dynamic_array {
dynamic_array_bucket_t* head;
dynamic_array_bucket_t* tail;
size_t size; /* number of elements in vector */
size_t element_size; /* size of elements in bytes */
};
typedef struct dynamic_array dynamic_array_t;
#define dynarr_init(type, v) dynarr_init_of_size_impl(v, sizeof(type), 8)
#define dynarr_init_of_size(type, v, cap) dynarr_init_of_size_impl(v, sizeof(type), cap)
int dynarr_init_of_size_impl(dynamic_array_t* v, size_t element_size, size_t capacity);
int dynarr_free(dynamic_array_t* v);
void* dynarr_get(dynamic_array_t* v, int index);
void* dynarr_push(dynamic_array_t* v, void* element);
int dynarr_pop(dynamic_array_t* v);
int dynarr_clear(dynamic_array_t* v);
#endif // _KERNEL_ALGO_DYNAMIC_ARRAY_H

View File

@@ -0,0 +1,11 @@
#ifndef _KERNEL_ALGO_HASH_H
#define _KERNEL_ALGO_HASH_H
#include <libkern/types.h>
#define hashint(hfunc, val) (hfunc((uint8_t*)&val, sizeof(val)))
uint32_t hash_crc32(uint8_t* data, size_t len);
uint32_t hashstr_crc32(char* data);
#endif // _KERNEL_ALGO_HASH_H

View File

@@ -0,0 +1,39 @@
#ifndef _KERNEL_ALGO_RINGBUFFER_H
#define _KERNEL_ALGO_RINGBUFFER_H
#include <libkern/libkern.h>
#include <libkern/lock.h>
#include <mem/kmemzone.h>
#define RINGBUFFER_STD_SIZE (16 * KB)
struct __ringbuffer {
kmemzone_t zone;
size_t start;
size_t end;
};
typedef struct __ringbuffer ringbuffer_t;
ringbuffer_t ringbuffer_create(size_t size);
static ALWAYS_INLINE ringbuffer_t ringbuffer_create_std() { return ringbuffer_create(RINGBUFFER_STD_SIZE); }
void ringbuffer_free(ringbuffer_t* rbuf);
ssize_t ringbuffer_space_to_read_from(ringbuffer_t* rbuf, size_t start);
ssize_t ringbuffer_space_to_read(ringbuffer_t* rbuf);
ssize_t ringbuffer_space_to_write(ringbuffer_t* rbuf);
size_t ringbuffer_read_from(ringbuffer_t* rbuf, size_t ustart, uint8_t __user* buf, size_t siz);
size_t ringbuffer_read_user_from(ringbuffer_t* rbuf, size_t ustart, uint8_t __user* buf, size_t siz);
size_t ringbuffer_read(ringbuffer_t* rbuf, uint8_t*, size_t);
size_t ringbuffer_read_user(ringbuffer_t* rbuf, uint8_t __user* buf, size_t siz);
size_t ringbuffer_write(ringbuffer_t* rbuf, const uint8_t*, size_t);
size_t ringbuffer_write_user(ringbuffer_t* rbuf, const uint8_t __user* buf, size_t siz);
size_t ringbuffer_write_ignore_bounds(ringbuffer_t* rbuf, const uint8_t* buf, size_t siz);
size_t ringbuffer_write_user_ignore_bounds(ringbuffer_t* rbuf, const uint8_t* __user buf, size_t siz);
size_t ringbuffer_read_one(ringbuffer_t* rbuf, uint8_t* data);
size_t ringbuffer_write_one(ringbuffer_t* rbuf, uint8_t data);
void ringbuffer_clear(ringbuffer_t* rbuf);
#endif //_KERNEL_ALGO_RINGBUFFER_H

View File

@@ -0,0 +1,131 @@
#ifndef _KERNEL_ALGO_SYNC_RINGBUFFER_H
#define _KERNEL_ALGO_SYNC_RINGBUFFER_H
#include <algo/ringbuffer.h>
#include <libkern/libkern.h>
#include <libkern/lock.h>
#include <mem/kmemzone.h>
struct __sync_ringbuffer {
ringbuffer_t ringbuffer;
spinlock_t lock;
};
typedef struct __sync_ringbuffer sync_ringbuffer_t;
static ALWAYS_INLINE sync_ringbuffer_t sync_ringbuffer_create(size_t size)
{
sync_ringbuffer_t res;
res.ringbuffer = ringbuffer_create(size);
spinlock_init(&res.lock);
return res;
}
#define sync_ringbuffer_create_std() sync_ringbuffer_create(RINGBUFFER_STD_SIZE)
static ALWAYS_INLINE void sync_ringbuffer_free(sync_ringbuffer_t* buf)
{
spinlock_acquire(&buf->lock);
ringbuffer_free(&buf->ringbuffer);
spinlock_release(&buf->lock);
}
static ALWAYS_INLINE ssize_t sync_ringbuffer_space_to_read(sync_ringbuffer_t* buf)
{
spinlock_acquire(&buf->lock);
ssize_t res = ringbuffer_space_to_read(&buf->ringbuffer);
spinlock_release(&buf->lock);
return res;
}
static ALWAYS_INLINE ssize_t sync_ringbuffer_space_to_read_from(sync_ringbuffer_t* buf, size_t start)
{
spinlock_acquire(&buf->lock);
ssize_t res = ringbuffer_space_to_read_from(&buf->ringbuffer, start);
spinlock_release(&buf->lock);
return res;
}
static ALWAYS_INLINE ssize_t sync_ringbuffer_space_to_write(sync_ringbuffer_t* buf)
{
spinlock_acquire(&buf->lock);
ssize_t res = ringbuffer_space_to_write(&buf->ringbuffer);
spinlock_release(&buf->lock);
return res;
}
static ALWAYS_INLINE size_t sync_ringbuffer_read_from(sync_ringbuffer_t* buf, size_t start, uint8_t* holder, size_t siz)
{
spinlock_acquire(&buf->lock);
size_t res = ringbuffer_read_from(&buf->ringbuffer, start, holder, siz);
spinlock_release(&buf->lock);
return res;
}
static ALWAYS_INLINE size_t sync_ringbuffer_read_user_from(sync_ringbuffer_t* buf, size_t start, uint8_t __user* holder, size_t siz)
{
spinlock_acquire(&buf->lock);
size_t res = ringbuffer_read_user_from(&buf->ringbuffer, start, holder, siz);
spinlock_release(&buf->lock);
return res;
}
static ALWAYS_INLINE size_t sync_ringbuffer_read(sync_ringbuffer_t* buf, uint8_t* v, size_t a)
{
spinlock_acquire(&buf->lock);
size_t res = ringbuffer_read(&buf->ringbuffer, v, a);
spinlock_release(&buf->lock);
return res;
}
static ALWAYS_INLINE size_t sync_ringbuffer_read_user(sync_ringbuffer_t* buf, uint8_t __user* v, size_t a)
{
spinlock_acquire(&buf->lock);
size_t res = ringbuffer_read_user(&buf->ringbuffer, v, a);
spinlock_release(&buf->lock);
return res;
}
static ALWAYS_INLINE size_t sync_ringbuffer_write(sync_ringbuffer_t* buf, const uint8_t* v, size_t a)
{
spinlock_acquire(&buf->lock);
size_t res = ringbuffer_write(&buf->ringbuffer, v, a);
spinlock_release(&buf->lock);
return res;
}
static ALWAYS_INLINE size_t sync_ringbuffer_write_user(sync_ringbuffer_t* buf, const uint8_t __user* v, size_t a)
{
spinlock_acquire(&buf->lock);
size_t res = ringbuffer_write_user(&buf->ringbuffer, v, a);
spinlock_release(&buf->lock);
return res;
}
static ALWAYS_INLINE size_t sync_ringbuffer_write_ignore_bounds(sync_ringbuffer_t* buf, const uint8_t* holder, size_t siz)
{
spinlock_acquire(&buf->lock);
size_t res = ringbuffer_write_ignore_bounds(&buf->ringbuffer, holder, siz);
spinlock_release(&buf->lock);
return res;
}
static ALWAYS_INLINE size_t sync_ringbuffer_write_user_ignore_bounds(sync_ringbuffer_t* buf, const uint8_t __user* holder, size_t siz)
{
spinlock_acquire(&buf->lock);
size_t res = ringbuffer_write_user_ignore_bounds(&buf->ringbuffer, holder, siz);
spinlock_release(&buf->lock);
return res;
}
static ALWAYS_INLINE size_t sync_ringbuffer_read_one(sync_ringbuffer_t* buf, uint8_t* data)
{
spinlock_acquire(&buf->lock);
size_t res = ringbuffer_read_one(&buf->ringbuffer, data);
spinlock_release(&buf->lock);
return res;
}
static ALWAYS_INLINE size_t sync_ringbuffer_write_one(sync_ringbuffer_t* buf, uint8_t data)
{
spinlock_acquire(&buf->lock);
size_t res = ringbuffer_write_one(&buf->ringbuffer, data);
spinlock_release(&buf->lock);
return res;
}
static ALWAYS_INLINE void sync_ringbuffer_clear(sync_ringbuffer_t* buf)
{
spinlock_acquire(&buf->lock);
ringbuffer_clear(&buf->ringbuffer);
spinlock_release(&buf->lock);
}
#endif //_KERNEL_ALGO_SYNC_RINGBUFFER_H