Squash commits for public release
This commit is contained in:
143
utils/codeassistant/clang_tidy.py
Normal file
143
utils/codeassistant/clang_tidy.py
Normal file
@@ -0,0 +1,143 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
|
||||
target = "all" # all, kernel, userland
|
||||
target_arch = "x86_64"
|
||||
|
||||
|
||||
class ClassTidyLauncher():
|
||||
|
||||
backend_flags = {
|
||||
"x86": ["-c", "-m32",
|
||||
"-D_LIBCXX_BUILD_XOS_EXTENSIONS"],
|
||||
"arm32": [
|
||||
"-fno-builtin",
|
||||
"-march=armv7-a",
|
||||
"-mfpu=neon-vfpv4",
|
||||
"-mfloat-abi=soft",
|
||||
"-D_LIBCXX_BUILD_XOS_EXTENSIONS",
|
||||
],
|
||||
"arm64": [
|
||||
"-fno-builtin",
|
||||
"-mcpu=cortex-a53+nofp+nosimd+nocrypto+nocrc",
|
||||
"-D_LIBCXX_BUILD_XOS_EXTENSIONS",
|
||||
],
|
||||
"x86_64": [
|
||||
"-c",
|
||||
"-D_LIBCXX_BUILD_XOS_EXTENSIONS",
|
||||
]
|
||||
}
|
||||
|
||||
def __init__(self, dir, includes):
|
||||
self.path_dir = dir
|
||||
self.include = includes
|
||||
self.front_flags = ["--use-color", "--fix"]
|
||||
self.back_flags = self.backend_flags[target_arch]
|
||||
|
||||
def run_clang_tidy(self, ff, files, bf):
|
||||
cmd = ["clang-tidy"]
|
||||
cmd.extend(ff)
|
||||
cmd.extend(files)
|
||||
cmd.extend(["--"])
|
||||
cmd.extend(bf)
|
||||
result = subprocess.run(cmd, stdout=subprocess.PIPE)
|
||||
return result.stdout
|
||||
|
||||
def process_includes(self):
|
||||
for i in self.include:
|
||||
self.back_flags.append("-I")
|
||||
self.back_flags.append(i)
|
||||
|
||||
def get_files(self):
|
||||
self.c_files = []
|
||||
self.cpp_files = []
|
||||
|
||||
platforms = ['x86', 'i386', 'x86_64', 'arm',
|
||||
'arm32', 'arm64', 'aarch32', 'aarch64']
|
||||
bits = ['bits32', 'bits64']
|
||||
|
||||
platform_to_bits = {
|
||||
"x86": "bits32",
|
||||
"x86_64": "bits64",
|
||||
"arm32": "bits32",
|
||||
"arm64": "bits64",
|
||||
}
|
||||
|
||||
allowed_paths = {
|
||||
"x86": ["x86", "i386"],
|
||||
"x86_64": ["x86", "x86_64"],
|
||||
"arm32": ["aarch32", "arm32", "arm"],
|
||||
"arm64": ["aarch64", "arm64", "arm"],
|
||||
}
|
||||
|
||||
ignore_platforms = []
|
||||
ignore_bits = []
|
||||
|
||||
allowed_paths_for_target = allowed_paths.get(target_arch, None)
|
||||
if allowed_paths_for_target is None:
|
||||
print("Unknown platform {0}".format(target_arch))
|
||||
exit(1)
|
||||
|
||||
for platform in platforms:
|
||||
if not (platform in allowed_paths_for_target):
|
||||
ignore_platforms.append(platform)
|
||||
|
||||
for bit in bits:
|
||||
if platform_to_bits[target_arch] != bit:
|
||||
ignore_bits.append(bit)
|
||||
|
||||
def is_file_type(name, ending):
|
||||
if len(name) <= len(ending):
|
||||
return False
|
||||
return (name[-len(ending)-1::] == '.'+ending)
|
||||
|
||||
def is_file_blocked(name):
|
||||
for platform in ignore_platforms:
|
||||
if (name.find(platform) != -1):
|
||||
return True
|
||||
for bit in ignore_bits:
|
||||
if (name.find(bit) != -1):
|
||||
return True
|
||||
return False
|
||||
|
||||
for path, subdirs, files in os.walk(self.path_dir):
|
||||
for name in files:
|
||||
# It runs from out dir, at least it should
|
||||
file = path + "/" + name
|
||||
if is_file_blocked(file):
|
||||
continue
|
||||
if is_file_type(file, 'c'):
|
||||
self.c_files.append(file)
|
||||
if is_file_type(file, 'cpp'):
|
||||
self.cpp_files.append(file)
|
||||
|
||||
def process(self):
|
||||
self.process_includes()
|
||||
self.get_files()
|
||||
self.c_back_flags = self.back_flags
|
||||
self.c_back_flags += ["-std=gnu98"]
|
||||
ret = ""
|
||||
if len(self.c_files) > 0:
|
||||
ret += self.run_clang_tidy(self.front_flags,
|
||||
self.c_files, self.c_back_flags).decode("ascii")
|
||||
self.cpp_back_flags = self.back_flags
|
||||
self.cpp_back_flags += ["-std=c++2a"]
|
||||
if len(self.cpp_files) > 0:
|
||||
ret += self.run_clang_tidy(self.front_flags,
|
||||
self.cpp_files, self.cpp_back_flags).decode("ascii")
|
||||
return ret
|
||||
|
||||
|
||||
kernel_includes = ["kernel/include"]
|
||||
app_includes = ["libs/libc/include", "libs/libcxx/include", "libs/libfoundation/include",
|
||||
"libs/libipc/include", "libs/libg/include", "libs/libui/include"]
|
||||
|
||||
if target == "all" or target == "kernel":
|
||||
print(ClassTidyLauncher("kernel/kernel", kernel_includes).process())
|
||||
|
||||
if target == "all" or target == "userland":
|
||||
print(ClassTidyLauncher("servers/", app_includes).process())
|
||||
print(ClassTidyLauncher("libs/", app_includes).process())
|
||||
63
utils/codeassistant/include_guards_fix.py
Normal file
63
utils/codeassistant/include_guards_fix.py
Normal file
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env python
|
||||
# Launch the script from root of the project to have the correct paths
|
||||
|
||||
import os
|
||||
import sys
|
||||
from os import fdopen, remove
|
||||
|
||||
walk_dir = sys.argv[1]
|
||||
|
||||
print('walk_dir = ' + walk_dir)
|
||||
print('walk_dir (absolute) = ' + os.path.abspath(walk_dir))
|
||||
|
||||
all_includes = []
|
||||
|
||||
|
||||
def is_guard(line):
|
||||
if line.startswith("#ifndef _"):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_guard(line):
|
||||
return line[8:-1]
|
||||
|
||||
|
||||
def new_guard(line, path):
|
||||
gen = path.split('/')
|
||||
gen = list(filter(lambda a: a != "libs", gen))
|
||||
gen = list(filter(lambda a: a != "include", gen))
|
||||
line = "_"
|
||||
for l in gen:
|
||||
line += l + "_"
|
||||
line = line.replace(".", "_")
|
||||
line = line.replace("-", "_")
|
||||
line = line.upper()
|
||||
return line[:-1]
|
||||
|
||||
|
||||
def fix_guards(file):
|
||||
print("prc ", file)
|
||||
data = []
|
||||
guard = None
|
||||
with open(file) as old_file:
|
||||
for line in old_file:
|
||||
data.append(line)
|
||||
if is_guard(line) and guard is None:
|
||||
guard = get_guard(line)
|
||||
|
||||
if guard is None:
|
||||
return
|
||||
|
||||
ng = new_guard(guard, file)
|
||||
|
||||
with open(file, 'w') as new_file:
|
||||
for i in data:
|
||||
i = i.replace(guard, ng)
|
||||
new_file.write(i)
|
||||
|
||||
|
||||
for root, subdirs, files in os.walk(walk_dir):
|
||||
for x in files:
|
||||
if x.endswith(".h") or x.endswith(".hpp") or root.find("/libcxx/include") != -1:
|
||||
fix_guards(os.path.join(root, x))
|
||||
92
utils/codeassistant/libkern_libc_compat.py
Normal file
92
utils/codeassistant/libkern_libc_compat.py
Normal file
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env python
|
||||
# The tool checks the compatability of linkern and libc bits/ structs
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def list_of_files_relative(opath):
|
||||
res = []
|
||||
|
||||
def is_file_type(name, ending):
|
||||
if len(name) <= len(ending):
|
||||
return False
|
||||
return (name[-len(ending)-1::] == '.'+ending)
|
||||
|
||||
for path, subdirs, files in os.walk(opath):
|
||||
for name in files:
|
||||
# It runs from out dir, at least it should
|
||||
file = path + "/" + name
|
||||
if is_file_type(file, 'h'):
|
||||
res.append(file[len(opath):])
|
||||
return res
|
||||
|
||||
|
||||
def process_file(file, res_map):
|
||||
def accept_line(line):
|
||||
if line.endswith("\n"):
|
||||
line = line[:-1]
|
||||
|
||||
if (len(line) == 0):
|
||||
return False
|
||||
|
||||
block = [
|
||||
"#include",
|
||||
"#ifndef",
|
||||
"#endif",
|
||||
"#define _KERNEL_LIBKERN",
|
||||
"#define _LIBC",
|
||||
]
|
||||
|
||||
for b in block:
|
||||
if line.startswith(b):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
with open(file) as ofile:
|
||||
for line in ofile:
|
||||
if line.endswith("\n"):
|
||||
line = line[:-1]
|
||||
if accept_line(line):
|
||||
if line in res_map:
|
||||
res_map[line] += 1
|
||||
else:
|
||||
res_map[line] = 1
|
||||
|
||||
|
||||
def create_map_of_lines(files):
|
||||
res_map = {}
|
||||
for f in files:
|
||||
process_file(f, res_map)
|
||||
return res_map
|
||||
|
||||
|
||||
def check_files(pbase, pslave, files):
|
||||
filesbase = [pbase+x for x in files]
|
||||
filesslave = [pslave+x for x in files]
|
||||
libkern_map = create_map_of_lines(filesbase)
|
||||
libc_map = create_map_of_lines(filesslave)
|
||||
|
||||
for i, x in libkern_map.items():
|
||||
if i in libc_map:
|
||||
if x != libc_map[i]:
|
||||
return False
|
||||
else:
|
||||
print("Can't find {0} in LibC".format(i))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
libkern_files = list_of_files_relative("kernel/include/libkern/bits")
|
||||
libc_files = list_of_files_relative("libs/libc/include/bits")
|
||||
|
||||
if len(libkern_files) != len(libc_files):
|
||||
print("Note: LibC and LibKern might not be compatible, taking LibKern as base")
|
||||
|
||||
if check_files("kernel/include/libkern/bits", "libs/libc/include/bits", libkern_files):
|
||||
print("OK")
|
||||
else:
|
||||
print("Failed")
|
||||
exit(1)
|
||||
40
utils/codeassistant/pongo_startup.py
Normal file
40
utils/codeassistant/pongo_startup.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import os
|
||||
import subprocess
|
||||
from git import Repo
|
||||
|
||||
rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
pongodir = rootdir + "/.cached/pongoOS"
|
||||
|
||||
if not os.path.exists(pongodir):
|
||||
url = "https://github.com/xOS-Project/pongoOS.git"
|
||||
print("Cloning pongoOS to ", pongodir)
|
||||
Repo.clone_from(url, pongodir)
|
||||
|
||||
|
||||
def run_command(cmd, cwd="."):
|
||||
result = subprocess.run(
|
||||
cmd, stdout=subprocess.PIPE, shell=True, cwd=cwd, env=os.environ.copy())
|
||||
return (result.stdout.decode("ascii"), result.returncode)
|
||||
|
||||
|
||||
print("Rebuilding pongoOS")
|
||||
run_command("make -j16", pongodir)
|
||||
|
||||
print("Ready: pongoOS")
|
||||
checkrain_exec = os.environ.get('CHECKRAIN')
|
||||
if checkrain_exec is None:
|
||||
print("Error: No provided $CHECKRAIN env variable.")
|
||||
print("Please get a checkrain binary at http://checkra.in/ and set $CHECKRAIN to the binary path.")
|
||||
print("")
|
||||
print("E.g on macOS after getting binary and installing it to /Application, run:")
|
||||
print("\texport CHECKRAIN=/Applications/checkra1n.app/Contents/MacOS/checkra1n")
|
||||
exit(1)
|
||||
|
||||
print("Checkrain is found. Connect your device and switch it to DFU mode.")
|
||||
run_command("$CHECKRAIN -k " + pongodir +
|
||||
"/build/PongoConsolidated.bin -cpE", pongodir)
|
||||
|
||||
xos_outdir = os.path.dirname(rootdir) + "/out/"
|
||||
pathrun = "python3 scripts/load_xos.py -k {0}/rawImage.bin -r {0}/one.img".format(
|
||||
xos_outdir)
|
||||
run_command(pathrun, pongodir)
|
||||
21
utils/codeassistant/recompile_connections.py
Normal file
21
utils/codeassistant/recompile_connections.py
Normal file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env python3
|
||||
# Launch the script from root of the project to have the correct paths
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
|
||||
connections = [
|
||||
["libs/libapi/includes/libapi/window_server/Connections/ws_connection.ipc",
|
||||
"libs/libapi/includes/libapi/window_server/Connections/WSConnection.h"],
|
||||
]
|
||||
|
||||
for conn in connections:
|
||||
inf = conn[0]
|
||||
outf = conn[1]
|
||||
print("Compiling {0} -> {1}", inf, outf)
|
||||
cmd = ["utils/compilers/ConnectionCompiler/connc"]
|
||||
cmd.extend([inf, outf])
|
||||
result = subprocess.run(cmd, stdout=subprocess.PIPE)
|
||||
print(result.stdout.decode("ascii"))
|
||||
print()
|
||||
33
utils/codeassistant/syscall_parser.py
Normal file
33
utils/codeassistant/syscall_parser.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import requests
|
||||
import lxml.html as lh
|
||||
import pandas as pd
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('arch', type=str, help='Arch [arm, x86]')
|
||||
args = parser.parse_args()
|
||||
|
||||
url='https://chromium.googlesource.com/chromiumos/docs/+/master/constants/syscalls.md'
|
||||
page = requests.get(url)
|
||||
doc = lh.fromstring(page.content)
|
||||
tr_elements = doc.xpath('//tr')
|
||||
|
||||
start = 0
|
||||
|
||||
for i, tr in enumerate(tr_elements):
|
||||
if tr[0].text_content() == "syscall name":
|
||||
start = i + 1
|
||||
|
||||
|
||||
data = []
|
||||
for tr in tr_elements[start:]:
|
||||
table_col = 2
|
||||
if args.arch == "x86":
|
||||
table_col = 4
|
||||
if tr[table_col].text_content() != "-":
|
||||
data.append([tr[0].text_content(), int(tr[table_col].text_content())])
|
||||
|
||||
data.sort(key=lambda x: x[1])
|
||||
|
||||
for i in data:
|
||||
print("SYS_{0} = {1},".format(i[0].upper(), i[1]))
|
||||
232
utils/compilers/ConnectionCompiler/Generator/generator.py
Normal file
232
utils/compilers/ConnectionCompiler/Generator/generator.py
Normal file
@@ -0,0 +1,232 @@
|
||||
class Message:
|
||||
def __init__(self, name, id, reply_id, decoder_magic, params, protected=False):
|
||||
self.name = name
|
||||
self.id = id
|
||||
self.reply_id = reply_id
|
||||
self.decoder_magic = decoder_magic
|
||||
self.params = params
|
||||
self.protected = protected
|
||||
|
||||
|
||||
class Generator:
|
||||
|
||||
def __init__(self):
|
||||
self.output = None
|
||||
|
||||
def out(self, str, tabs=0):
|
||||
for i in range(tabs):
|
||||
self.output.write(" ")
|
||||
self.output.write(str)
|
||||
self.output.write("\n")
|
||||
|
||||
def params_readable(self, params):
|
||||
res = ""
|
||||
if len(params) > 0:
|
||||
for i in params:
|
||||
res += "{0} {1},".format(i[0], i[1])
|
||||
res = res[:-1]
|
||||
return res
|
||||
|
||||
def message_create_std_funcs(self, msg):
|
||||
self.out("int id() const override {{ return {0}; }}".format(msg.id), 1)
|
||||
self.out("int reply_id() const override {{ return {0}; }}".format(
|
||||
msg.reply_id), 1)
|
||||
if msg.protected:
|
||||
self.out("int key() const override { return m_key; }", 1)
|
||||
self.out("int decoder_magic() const override {{ return {0}; }}".format(
|
||||
msg.decoder_magic), 1)
|
||||
for i in msg.params:
|
||||
if i[0] in ["int", "uint32_t", "bool", "int32_t"]:
|
||||
self.out(
|
||||
"{0} {1}() const {{ return m_{1}; }}".format(i[0], i[1]), 1)
|
||||
else:
|
||||
self.out(
|
||||
"{0}& {1}() {{ return m_{1}; }}".format(i[0], i[1]), 1)
|
||||
|
||||
def message_create_vars(self, msg):
|
||||
if msg.protected:
|
||||
self.out("message_key_t m_key;", 1)
|
||||
for i in msg.params:
|
||||
self.out("{0} m_{1};".format(i[0], i[1]), 1)
|
||||
|
||||
def message_create_constructor(self, msg):
|
||||
params = msg.params
|
||||
if msg.protected:
|
||||
params = [('message_key_t', 'key')] + msg.params
|
||||
res = "{0}({1})".format(msg.name, self.params_readable(params))
|
||||
if len(params) > 0:
|
||||
self.out(res, 1)
|
||||
sign = ':'
|
||||
for i in params:
|
||||
self.out("{0} m_{1}({1})".format(sign, i[1]), 2)
|
||||
sign = ','
|
||||
|
||||
self.out("{", 1)
|
||||
self.out("}", 1)
|
||||
else:
|
||||
self.out(res+" {}", 1)
|
||||
|
||||
def message_create_encoder(self, msg):
|
||||
self.out("EncodedMessage encode() const override".format(
|
||||
msg.decoder_magic), 1)
|
||||
self.out("{", 1)
|
||||
|
||||
self.out("EncodedMessage buffer;", 2)
|
||||
self.out("Encoder::append(buffer, decoder_magic());", 2)
|
||||
self.out("Encoder::append(buffer, id());", 2)
|
||||
if msg.protected:
|
||||
self.out("Encoder::append(buffer, key());", 2)
|
||||
for i in msg.params:
|
||||
self.out("Encoder::append(buffer, m_{0});".format(i[1]), 2)
|
||||
|
||||
self.out("return buffer;", 2)
|
||||
self.out("}", 1)
|
||||
|
||||
def generate_message(self, msg):
|
||||
self.out("class {0} : public Message {{".format(msg.name))
|
||||
self.out("public:")
|
||||
self.message_create_constructor(msg)
|
||||
self.message_create_std_funcs(msg)
|
||||
self.message_create_encoder(msg)
|
||||
self.out("private:")
|
||||
self.message_create_vars(msg)
|
||||
self.out("};")
|
||||
self.out("")
|
||||
|
||||
def decoder_create_vars(self, messages, offset=0):
|
||||
var_names = set()
|
||||
for (name, params) in messages.items():
|
||||
for i in params:
|
||||
if 'var_{0}'.format(i[1]) not in var_names:
|
||||
self.out("{0} var_{1};".format(i[0], i[1]), offset)
|
||||
var_names.add('var_{0}'.format(i[1]))
|
||||
|
||||
def decoder_decode_message(self, msg, offset=0):
|
||||
params_str = ""
|
||||
if msg.protected:
|
||||
params_str = "secret_key, "
|
||||
for i in msg.params:
|
||||
params_str += "var_{0}, ".format(i[1])
|
||||
|
||||
if len(params_str) > 0:
|
||||
params_str = params_str[:-2]
|
||||
for i in msg.params:
|
||||
self.out(
|
||||
"Encoder::decode(buf, decoded_msg_len, var_{0});".format(i[1]), offset)
|
||||
self.out("return new {0}({1});".format(msg.name, params_str), offset)
|
||||
|
||||
def decoder_create_std_funcs(self, decoder):
|
||||
self.out("int magic() const {{ return {0}; }}".format(
|
||||
decoder.magic), 1)
|
||||
|
||||
def decoder_create_decode(self, decoder):
|
||||
self.out(
|
||||
"std::unique_ptr<Message> decode(const char* buf, size_t size, size_t& decoded_msg_len) override", 1)
|
||||
self.out("{", 1)
|
||||
self.out("int msg_id, decoder_magic;", 2)
|
||||
self.out("size_t saved_dml = decoded_msg_len;", 2)
|
||||
self.out("Encoder::decode(buf, decoded_msg_len, decoder_magic);", 2)
|
||||
self.out("Encoder::decode(buf, decoded_msg_len, msg_id);", 2)
|
||||
self.out("if (magic() != decoder_magic) {", 2)
|
||||
self.out("decoded_msg_len = saved_dml;", 3)
|
||||
self.out("return nullptr;", 3)
|
||||
self.out("}", 2)
|
||||
|
||||
if decoder.protected:
|
||||
self.out("message_key_t secret_key;", 2)
|
||||
self.out("Encoder::decode(buf, decoded_msg_len, secret_key);", 2)
|
||||
self.out("", 0)
|
||||
|
||||
self.decoder_create_vars(decoder.messages, 2)
|
||||
|
||||
unique_msg_id = 1
|
||||
self.out("", 2)
|
||||
self.out("switch(msg_id) {", 2)
|
||||
for (name, params) in decoder.messages.items():
|
||||
self.out("case {0}:".format(unique_msg_id), 2)
|
||||
# Here it doen't need to know the real reply_id, so we can put 0 here.
|
||||
self.decoder_decode_message(
|
||||
Message(name, unique_msg_id, 0, decoder.magic, params, decoder.protected), 3)
|
||||
unique_msg_id += 1
|
||||
|
||||
self.out("default:", 2)
|
||||
self.out("decoded_msg_len = saved_dml;", 3)
|
||||
self.out("return nullptr;", 3)
|
||||
self.out("}", 2)
|
||||
self.out("}", 1)
|
||||
self.out("", 1)
|
||||
|
||||
def decoder_create_handle(self, decoder):
|
||||
self.out("std::unique_ptr<Message> handle(Message& msg) override", 1)
|
||||
self.out("{", 1)
|
||||
self.out("if (magic() != msg.decoder_magic()) {", 2)
|
||||
self.out("return nullptr;", 3)
|
||||
self.out("}", 2)
|
||||
|
||||
unique_msg_id = 1
|
||||
self.out("", 2)
|
||||
self.out("switch(msg.id()) {", 2)
|
||||
for (name, params) in decoder.messages.items():
|
||||
if name in decoder.functions:
|
||||
self.out("case {0}:".format(unique_msg_id), 2)
|
||||
self.out(
|
||||
"return handle(static_cast<{0}&>(msg));".format(name), 3)
|
||||
|
||||
unique_msg_id += 1
|
||||
|
||||
self.out("default:", 2)
|
||||
self.out("return nullptr;", 3)
|
||||
self.out("}", 2)
|
||||
self.out("}", 1)
|
||||
self.out("", 1)
|
||||
|
||||
def decoder_create_virtual_handle(self, decoder):
|
||||
for (accept, ret) in decoder.functions.items():
|
||||
self.out(
|
||||
"virtual std::unique_ptr<Message> handle({0}& msg) {{ return nullptr; }}".format(accept), 1)
|
||||
|
||||
def generate_decoder(self, decoder):
|
||||
self.out("class {0} : public MessageDecoder {{".format(decoder.name))
|
||||
self.out("public:")
|
||||
self.out("{0}() {{}}".format(decoder.name), 1)
|
||||
self.decoder_create_std_funcs(decoder)
|
||||
self.decoder_create_decode(decoder)
|
||||
self.decoder_create_handle(decoder)
|
||||
self.decoder_create_virtual_handle(decoder)
|
||||
self.out("};")
|
||||
self.out("")
|
||||
|
||||
def includes(self):
|
||||
self.out("// Auto generated with utils/ConnectionCompiler")
|
||||
self.out("// See .ipc file")
|
||||
self.out("")
|
||||
self.out("#pragma once")
|
||||
self.out("#include <libipc/Encoder.h>")
|
||||
self.out("#include <libipc/ClientConnection.h>")
|
||||
self.out("#include <libipc/ServerConnection.h>")
|
||||
self.out("#include <libipc/StringEncoder.h>")
|
||||
self.out("#include <libipc/VectorEncoder.h>")
|
||||
self.out("#include <new>")
|
||||
self.out("#include <libg/Rect.h>")
|
||||
self.out("")
|
||||
|
||||
def generate(self, filename, decoders):
|
||||
self.output = open(filename, "w+")
|
||||
self.includes()
|
||||
for decoder in decoders:
|
||||
msgd = {}
|
||||
unique_msg_id = 1
|
||||
for (name, params) in decoder.messages.items():
|
||||
msgd[name] = unique_msg_id
|
||||
unique_msg_id += 1
|
||||
|
||||
for (name, params) in decoder.messages.items():
|
||||
reply_name = decoder.functions.get(name, None)
|
||||
reply_id = -1
|
||||
if reply_name is not None:
|
||||
reply_id = msgd[reply_name]
|
||||
self.generate_message(
|
||||
Message(name, msgd[name], reply_id, decoder.magic, params, decoder.protected))
|
||||
|
||||
self.generate_decoder(decoder)
|
||||
self.output.close()
|
||||
108
utils/compilers/ConnectionCompiler/Lexer/lexer.py
Normal file
108
utils/compilers/ConnectionCompiler/Lexer/lexer.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from token import Token
|
||||
from Lexer.reserved_symbols import *
|
||||
from type_file import Type
|
||||
|
||||
|
||||
class Lexer():
|
||||
MAX_OPERATION_SIZE = 2
|
||||
|
||||
def __init__(self, code: [str]):
|
||||
self.code = code
|
||||
self.current_code_part = code[0]
|
||||
self.current_code_line = 0
|
||||
self.code_lines = len(code)
|
||||
self.current_position = 0
|
||||
self.current_char = self.current_code_part[self.current_position]
|
||||
|
||||
def advance(self):
|
||||
self.current_position += 1
|
||||
if self.current_position < len(self.current_code_part):
|
||||
self.current_char = self.current_code_part[self.current_position]
|
||||
else:
|
||||
self.current_code_line += 1
|
||||
if self.current_code_line < self.code_lines:
|
||||
self.current_code_part = self.code[self.current_code_line]
|
||||
self.current_position = 0
|
||||
self.current_char = self.current_code_part[0]
|
||||
else:
|
||||
self.current_char = None
|
||||
|
||||
def skip_rest_of_line(self):
|
||||
start_with_line = self.current_code_line
|
||||
while self.current_char is not None and self.current_code_line == start_with_line:
|
||||
self.advance()
|
||||
|
||||
def lookup(self, count):
|
||||
peek_pos = self.current_position + count
|
||||
if peek_pos < len(self.current_code_part):
|
||||
return self.current_code_part[peek_pos]
|
||||
else:
|
||||
return None
|
||||
|
||||
def skip_gaps(self):
|
||||
while self.current_char is not None and self.current_char == ' ':
|
||||
self.advance()
|
||||
|
||||
def read_number(self):
|
||||
result = ""
|
||||
was_dot = False
|
||||
while self.current_char is not None and (self.current_char.isdigit() or self.current_char == '.'):
|
||||
if self.current_char == '.':
|
||||
if was_dot:
|
||||
print(">2 dots in number")
|
||||
exit(0)
|
||||
was_dot = True
|
||||
result += self.current_char
|
||||
self.advance()
|
||||
if was_dot:
|
||||
return Token(Type.Number.Real, float(result))
|
||||
else:
|
||||
return Token(Type.Number.Integer, int(result))
|
||||
|
||||
def read_word(self):
|
||||
type = Type.Word
|
||||
result = ""
|
||||
while self.current_char is not None and (self.current_char.isalpha() or self.current_char.isdigit() or self.current_char == '_' or self.current_char == '<' or self.current_char == '>' or (self.current_char == ':' and self.lookup(1) == ':')):
|
||||
if (self.current_char == ':'):
|
||||
result += self.current_char
|
||||
self.advance()
|
||||
result += self.current_char
|
||||
self.advance()
|
||||
|
||||
if result.upper() in reserved_words:
|
||||
return Token(reserved_words[result.upper()], result.upper())
|
||||
|
||||
return Token(type, result)
|
||||
|
||||
def read_operation(self):
|
||||
token = Token()
|
||||
operation = ""
|
||||
for i in range(self.MAX_OPERATION_SIZE):
|
||||
next_element = self.lookup(i)
|
||||
if next_element is None:
|
||||
break
|
||||
operation += next_element
|
||||
if operation in reserved_symbols.keys():
|
||||
token = Token(reserved_symbols[operation], operation)
|
||||
|
||||
if token.value is not None:
|
||||
for i in range(len(token.value)):
|
||||
self.advance()
|
||||
|
||||
return token
|
||||
|
||||
def next_token(self):
|
||||
self.skip_gaps()
|
||||
|
||||
while self.current_char == '#':
|
||||
self.skip_rest_of_line()
|
||||
self.skip_gaps()
|
||||
|
||||
if self.current_char is None:
|
||||
return Token(Type.Special.EOF, None)
|
||||
elif self.current_char.isdigit():
|
||||
return self.read_number()
|
||||
elif self.current_char.isalpha():
|
||||
return self.read_word()
|
||||
else:
|
||||
return self.read_operation()
|
||||
24
utils/compilers/ConnectionCompiler/Lexer/reserved_symbols.py
Normal file
24
utils/compilers/ConnectionCompiler/Lexer/reserved_symbols.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from type_file import Type
|
||||
|
||||
reserved_symbols = {
|
||||
'(': Type.Lang.LeftBracket,
|
||||
')': Type.Lang.RightBracket,
|
||||
';': Type.Lang.Semi,
|
||||
'.': Type.Lang.Dot,
|
||||
',': Type.Lang.Comma,
|
||||
':': Type.Lang.Colon,
|
||||
|
||||
'=>': Type.Reserved.Return,
|
||||
|
||||
'{': Type.Reserved.Begin,
|
||||
'}': Type.Reserved.End,
|
||||
}
|
||||
|
||||
reserved_words = {
|
||||
'NAME': Type.Reserved.Name,
|
||||
'MAGIC': Type.Reserved.Magic,
|
||||
'KEYPROTECTED': Type.Reserved.KeyProtected,
|
||||
}
|
||||
|
||||
available_var_types = [Type.Number.Integer,
|
||||
Type.Number.Real, Type.Number.Boolean]
|
||||
140
utils/compilers/ConnectionCompiler/Parser/parser.py
Normal file
140
utils/compilers/ConnectionCompiler/Parser/parser.py
Normal file
@@ -0,0 +1,140 @@
|
||||
from Lexer.lexer import Lexer
|
||||
from token import Token
|
||||
from type_file import Type
|
||||
from connection import Connection
|
||||
|
||||
|
||||
class Parser:
|
||||
|
||||
def set_code_lines(self, code: [str]):
|
||||
self.lexer = Lexer(code)
|
||||
self.token = Token()
|
||||
self.current_token_id = -1
|
||||
self.read_tokens = 0
|
||||
self.lexer_rich_the_end = 0
|
||||
self.tokens = []
|
||||
self.next_token()
|
||||
|
||||
def next_token(self):
|
||||
self.current_token_id += 1
|
||||
self.token = self.get_token_at(self.current_token_id)
|
||||
|
||||
def get_token_at(self, pos):
|
||||
if pos >= self.read_tokens and not self.lexer_rich_the_end:
|
||||
for i in range(pos - self.read_tokens + 1):
|
||||
self.tokens.append(self.lexer.next_token())
|
||||
self.read_tokens += 1
|
||||
if self.tokens[-1].type == Type.Special.EOF:
|
||||
self.lexer_rich_the_end = True
|
||||
break
|
||||
|
||||
return self.tokens[min(pos, self.read_tokens - 1)]
|
||||
|
||||
def is_nth(self, type_of_token, n):
|
||||
if isinstance(type_of_token, list):
|
||||
for type_of_cur_token in type_of_token:
|
||||
if self.get_token_at(self.current_token_id+n).type == type_of_cur_token:
|
||||
return True
|
||||
return False
|
||||
return self.get_token_at(self.current_token_id+n).type == type_of_token
|
||||
|
||||
def is_next(self, type_of_token):
|
||||
if isinstance(type_of_token, list):
|
||||
for type_of_cur_token in type_of_token:
|
||||
if self.token.type == type_of_cur_token:
|
||||
return True
|
||||
return False
|
||||
return self.token.type == type_of_token
|
||||
|
||||
def must_next(self, type_of_token):
|
||||
if not self.is_next(type_of_token):
|
||||
print("{0} is not {1}".format(self.token, type_of_token))
|
||||
exit(1)
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def eat_name(self):
|
||||
self.must_next(Type.Reserved.Name)
|
||||
self.next_token()
|
||||
self.must_next(Type.Lang.Colon)
|
||||
self.next_token()
|
||||
self.must_next(Type.Word)
|
||||
res = self.token.value
|
||||
self.next_token()
|
||||
return res
|
||||
|
||||
def eat_protected(self):
|
||||
if (self.is_next(Type.Reserved.KeyProtected)):
|
||||
self.next_token()
|
||||
return True
|
||||
return False
|
||||
|
||||
def eat_magic(self):
|
||||
self.must_next(Type.Reserved.Magic)
|
||||
self.next_token()
|
||||
self.must_next(Type.Lang.Colon)
|
||||
self.next_token()
|
||||
self.must_next(Type.Number.Integer)
|
||||
res = self.token.value
|
||||
self.next_token()
|
||||
return res
|
||||
|
||||
def eat_params(self):
|
||||
params = []
|
||||
self.must_next(Type.Lang.LeftBracket)
|
||||
self.next_token()
|
||||
while not self.is_next(Type.Lang.RightBracket):
|
||||
self.must_next(Type.Word)
|
||||
typ = self.token.value
|
||||
self.next_token()
|
||||
|
||||
self.must_next(Type.Word)
|
||||
nam = self.token.value
|
||||
self.next_token()
|
||||
|
||||
params.append([typ, nam])
|
||||
|
||||
if self.is_next(Type.Lang.Comma):
|
||||
self.must_next(Type.Lang.Comma)
|
||||
self.next_token()
|
||||
|
||||
self.must_next(Type.Lang.RightBracket)
|
||||
self.next_token()
|
||||
return params
|
||||
|
||||
def eat_message(self, decoder):
|
||||
self.must_next(Type.Word)
|
||||
msgname = self.token.value
|
||||
self.next_token()
|
||||
decoder.add_message(msgname, self.eat_params())
|
||||
return msgname
|
||||
|
||||
def eat_function(self, decoder):
|
||||
ms1 = self.eat_message(decoder)
|
||||
ms2 = None
|
||||
|
||||
if self.is_next(Type.Reserved.Return):
|
||||
self.must_next(Type.Reserved.Return)
|
||||
self.next_token()
|
||||
ms2 = self.eat_message(decoder)
|
||||
|
||||
decoder.add_function(ms1, ms2)
|
||||
|
||||
def eat_decoder(self):
|
||||
self.must_next(Type.Reserved.Begin)
|
||||
self.next_token()
|
||||
is_protected = self.eat_protected()
|
||||
decoder = Connection(self.eat_name(), self.eat_magic(), is_protected)
|
||||
while not self.is_next(Type.Reserved.End):
|
||||
self.eat_function(decoder)
|
||||
self.must_next(Type.Reserved.End)
|
||||
self.next_token()
|
||||
return decoder
|
||||
|
||||
def parse(self):
|
||||
decoders = []
|
||||
while self.is_next(Type.Reserved.Begin):
|
||||
decoders.append(self.eat_decoder())
|
||||
print("connc: {0} decoders parsed!".format(len(decoders)))
|
||||
return decoders
|
||||
30
utils/compilers/ConnectionCompiler/__main__.py
Normal file
30
utils/compilers/ConnectionCompiler/__main__.py
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from Parser.parser import Parser
|
||||
from Generator.generator import Generator
|
||||
from type_file import Type
|
||||
from token import Token
|
||||
import argparse
|
||||
|
||||
def run(input_f, output_f):
|
||||
|
||||
code = []
|
||||
|
||||
file = open(input_f, "r")
|
||||
for line in file:
|
||||
line = line.replace('\n', '')
|
||||
if len(line) > 0:
|
||||
code.append(line)
|
||||
|
||||
parser = Parser()
|
||||
gen = Generator()
|
||||
parser.set_code_lines(code)
|
||||
gen.generate(output_f, parser.parse())
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('inf', type=str, help='Input file')
|
||||
parser.add_argument('outf', type=str, help='Output file')
|
||||
|
||||
args = parser.parse_args()
|
||||
run(args.inf, args.outf)
|
||||
30
utils/compilers/ConnectionCompiler/connc
Normal file
30
utils/compilers/ConnectionCompiler/connc
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from Parser.parser import Parser
|
||||
from Generator.generator import Generator
|
||||
from type_file import Type
|
||||
from token import Token
|
||||
import argparse
|
||||
|
||||
def run(input_f, output_f):
|
||||
|
||||
code = []
|
||||
|
||||
file = open(input_f, "r")
|
||||
for line in file:
|
||||
line = line.replace('\n', '')
|
||||
if len(line) > 0:
|
||||
code.append(line)
|
||||
|
||||
parser = Parser()
|
||||
gen = Generator()
|
||||
parser.set_code_lines(code)
|
||||
gen.generate(output_f, parser.parse())
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('inf', type=str, help='Input file')
|
||||
parser.add_argument('outf', type=str, help='Output file')
|
||||
|
||||
args = parser.parse_args()
|
||||
run(args.inf, args.outf)
|
||||
21
utils/compilers/ConnectionCompiler/connection.py
Normal file
21
utils/compilers/ConnectionCompiler/connection.py
Normal file
@@ -0,0 +1,21 @@
|
||||
class Connection:
|
||||
def __init__(self, name, magic, protected=False):
|
||||
self.name = name
|
||||
self.magic = magic
|
||||
self.protected = protected
|
||||
self.messages = {}
|
||||
self.functions = {}
|
||||
|
||||
def add_message(self, msg_name, params):
|
||||
if msg_name in self.messages:
|
||||
op = self.messages[msg_name]
|
||||
if (params != op):
|
||||
print("{0} has 2 different params".format(msg1_name))
|
||||
exit(1)
|
||||
self.messages[msg_name] = params
|
||||
|
||||
def add_function(self, msg1_name, msg2_name=None):
|
||||
if msg1_name in self.functions:
|
||||
print("{0} has 2 functions".format(msg1_name))
|
||||
exit(1)
|
||||
self.functions[msg1_name] = msg2_name
|
||||
8
utils/compilers/ConnectionCompiler/token.py
Normal file
8
utils/compilers/ConnectionCompiler/token.py
Normal file
@@ -0,0 +1,8 @@
|
||||
class Token:
|
||||
|
||||
def __init__(self, type=None, value=None):
|
||||
self.type = type
|
||||
self.value = value
|
||||
|
||||
def __str__(self):
|
||||
return "Token({0}, {1})".format(self.type, self.value)
|
||||
39
utils/compilers/ConnectionCompiler/type_file.py
Normal file
39
utils/compilers/ConnectionCompiler/type_file.py
Normal file
@@ -0,0 +1,39 @@
|
||||
class Type:
|
||||
|
||||
Word = 'WORD'
|
||||
|
||||
class Number:
|
||||
Integer = 'INTEGER'
|
||||
Real = 'REAL'
|
||||
Boolean = 'BOOLEAN'
|
||||
|
||||
class BinaryOperation:
|
||||
Plus = 'PLUS'
|
||||
Minus = 'MINUS'
|
||||
Mul = 'MUL'
|
||||
Div = 'DIV'
|
||||
|
||||
class UnaryOperation:
|
||||
fill = 'FILL'
|
||||
Plus = 'PLUS'
|
||||
Minus = 'MINUS'
|
||||
Not = 'NOT'
|
||||
|
||||
class Lang:
|
||||
LeftBracket = 'LeftBracket'
|
||||
RightBracket = 'RightBracket'
|
||||
Semi = 'SEMI'
|
||||
Dot = 'DOT'
|
||||
Comma = 'COMMA'
|
||||
Colon = 'COLON'
|
||||
|
||||
class Reserved:
|
||||
Begin = 'BEGIN'
|
||||
End = 'END'
|
||||
Return = 'RETURN'
|
||||
Name = 'NAME'
|
||||
Magic = 'MAGIC'
|
||||
KeyProtected = 'KEYP'
|
||||
|
||||
class Special:
|
||||
EOF = 'EOF'
|
||||
39
utils/compilers/DevTreeCompiler/ABI/Structs.py
Normal file
39
utils/compilers/DevTreeCompiler/ABI/Structs.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from construct import *
|
||||
|
||||
DEVTREE_HEADER_SIGNATURE = "odtr3"
|
||||
|
||||
DEVTREE_HEADER = Struct(
|
||||
"signature" / PaddedString(8, "ascii"),
|
||||
"revision" / Int32ul,
|
||||
"flags" / Int32ul,
|
||||
"entries_count" / Int32ul,
|
||||
"name_list_offset" / Int32ul
|
||||
)
|
||||
|
||||
DEVTREE_ENTRY_FLAGS_MMIO = (1 << 0)
|
||||
DEVTREE_ENTRY_TYPE_IO = 0
|
||||
DEVTREE_ENTRY_TYPE_FB = 1
|
||||
DEVTREE_ENTRY_TYPE_UART = 2
|
||||
DEVTREE_ENTRY_TYPE_RAM = 3
|
||||
DEVTREE_ENTRY_TYPE_STORAGE = 4
|
||||
DEVTREE_ENTRY_TYPE_BUS_CONTROLLER = 5
|
||||
DEVTREE_ENTRY_TYPE_RTC = 6
|
||||
|
||||
# Currently flags maps to irq_flags_t in the kernel.
|
||||
# Later we might need to enhance irq_flags_from_devtree() to use as translator.
|
||||
DEVTREE_IRQ_FLAGS_EDGE_TRIGGER = (1 << 0)
|
||||
|
||||
DEVTREE_ENTRY = Struct(
|
||||
"type" / Int32ul,
|
||||
"flags" / Int32ul,
|
||||
"region_base" / Int64ul,
|
||||
"region_size" / Int64ul,
|
||||
"irq_lane" / Int32ul,
|
||||
"irq_flags" / Int32ul,
|
||||
"irq_priority" / Int32ul,
|
||||
"rel_name_offset" / Int32ul,
|
||||
"aux1" / Int64ul,
|
||||
"aux2" / Int64ul,
|
||||
"aux3" / Int64ul,
|
||||
"aux4" / Int64ul,
|
||||
)
|
||||
47
utils/compilers/DevTreeCompiler/ABI/Translation.py
Normal file
47
utils/compilers/DevTreeCompiler/ABI/Translation.py
Normal file
@@ -0,0 +1,47 @@
|
||||
from ABI.Structs import *
|
||||
|
||||
|
||||
class Translator():
|
||||
|
||||
@staticmethod
|
||||
def entry_flag_translator(s):
|
||||
translation = {
|
||||
"MMIO": DEVTREE_ENTRY_FLAGS_MMIO,
|
||||
}
|
||||
return translation.get(s, 0)
|
||||
|
||||
@staticmethod
|
||||
def irq_flag_translator(s):
|
||||
translation = {
|
||||
"EDGE_TRIGGER": DEVTREE_IRQ_FLAGS_EDGE_TRIGGER,
|
||||
}
|
||||
return translation.get(s, 0)
|
||||
|
||||
@staticmethod
|
||||
def entry_type(s):
|
||||
translation = {
|
||||
"IO": DEVTREE_ENTRY_TYPE_IO,
|
||||
"FB": DEVTREE_ENTRY_TYPE_FB,
|
||||
"UART": DEVTREE_ENTRY_TYPE_UART,
|
||||
"RAM": DEVTREE_ENTRY_TYPE_RAM,
|
||||
"STORAGE": DEVTREE_ENTRY_TYPE_STORAGE,
|
||||
"BUS_CONTROLLER": DEVTREE_ENTRY_TYPE_BUS_CONTROLLER,
|
||||
"RTC": DEVTREE_ENTRY_TYPE_RTC
|
||||
}
|
||||
return translation.get(s, DEVTREE_ENTRY_TYPE_IO)
|
||||
|
||||
@staticmethod
|
||||
def number(s):
|
||||
return int(s, base=0)
|
||||
|
||||
@staticmethod
|
||||
def flags(s, flagcb):
|
||||
flags = 0x0
|
||||
ents = s.split(" ")
|
||||
|
||||
for ent in ents:
|
||||
t = flagcb(s)
|
||||
if t != None:
|
||||
flags |= t
|
||||
|
||||
return flags
|
||||
24
utils/compilers/DevTreeCompiler/DevTreeCompiler.py
Normal file
24
utils/compilers/DevTreeCompiler/DevTreeCompiler.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from Parser.Parser import Parser
|
||||
from Generator.IRManager import IRManager
|
||||
from Generator.BinWriter import BinWriter
|
||||
from Generator.CWriter import CWriter
|
||||
from Generator.Compiler import Compiler
|
||||
import argparse
|
||||
|
||||
class DevTreeCompiler():
|
||||
|
||||
@staticmethod
|
||||
def compile(input_f, output_f):
|
||||
parser = Parser(input_f)
|
||||
irmng = IRManager(parser)
|
||||
compiler = Compiler(irmng)
|
||||
binw = BinWriter(output_f)
|
||||
binw.write(compiler.compile())
|
||||
|
||||
@staticmethod
|
||||
def to_c_arr(input_f):
|
||||
parser = Parser(input_f)
|
||||
irmng = IRManager(parser)
|
||||
compiler = Compiler(irmng)
|
||||
binw = CWriter()
|
||||
binw.write(compiler.compile())
|
||||
8
utils/compilers/DevTreeCompiler/Generator/BinWriter.py
Normal file
8
utils/compilers/DevTreeCompiler/Generator/BinWriter.py
Normal file
@@ -0,0 +1,8 @@
|
||||
class BinWriter():
|
||||
def __init__(self, output_file):
|
||||
self.output_file = output_file
|
||||
|
||||
def write(self, data):
|
||||
binfile = open(self.output_file, "wb")
|
||||
binfile.write(bytes(data))
|
||||
binfile.close()
|
||||
9
utils/compilers/DevTreeCompiler/Generator/CWriter.py
Normal file
9
utils/compilers/DevTreeCompiler/Generator/CWriter.py
Normal file
@@ -0,0 +1,9 @@
|
||||
class CWriter():
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def write(self, data):
|
||||
print("static uint8_t _devtree_raw[] = {")
|
||||
for byte in bytearray(data):
|
||||
print(hex(byte), end = ",")
|
||||
print("0x0};")
|
||||
110
utils/compilers/DevTreeCompiler/Generator/Compiler.py
Normal file
110
utils/compilers/DevTreeCompiler/Generator/Compiler.py
Normal file
@@ -0,0 +1,110 @@
|
||||
from Generator.IRManager import IRManager
|
||||
from ABI.Structs import *
|
||||
from ABI.Translation import *
|
||||
|
||||
|
||||
class Compiler():
|
||||
__debug = False
|
||||
|
||||
def __init__(self, irmng):
|
||||
self.irmng = irmng
|
||||
|
||||
self.res_binarr = bytearray()
|
||||
self.header_binarr = bytearray()
|
||||
self.devs_binarr = bytearray()
|
||||
self.names_binarr = bytearray()
|
||||
|
||||
def build_header(self):
|
||||
count_of_devs = len(self.irmng.device_list())
|
||||
|
||||
result = {
|
||||
"signature": DEVTREE_HEADER_SIGNATURE,
|
||||
"revision": 1,
|
||||
"flags": 0,
|
||||
"entries_count": count_of_devs,
|
||||
"name_list_offset": DEVTREE_HEADER.sizeof() + len(self.devs_binarr),
|
||||
}
|
||||
|
||||
self.header_binarr = DEVTREE_HEADER.build(result)
|
||||
|
||||
def build_dev(self, dev):
|
||||
result = {
|
||||
"type": 0,
|
||||
"flags": 0,
|
||||
"region_base": 0,
|
||||
"region_size": 0,
|
||||
"irq_lane": 0,
|
||||
"irq_flags": 0,
|
||||
"irq_priority": 0,
|
||||
"rel_name_offset": len(self.names_binarr),
|
||||
"aux1": 0,
|
||||
"aux2": 0,
|
||||
"aux3": 0,
|
||||
"aux4": 0,
|
||||
}
|
||||
|
||||
if "type" in dev:
|
||||
result["type"] = Translator.entry_type(dev["type"])
|
||||
|
||||
if "flags" in dev:
|
||||
result["flags"] = Translator.flags(
|
||||
dev["flags"], Translator.entry_flag_translator)
|
||||
|
||||
if "mem" in dev:
|
||||
devmem = dev["mem"]
|
||||
if "base" in devmem:
|
||||
result["region_base"] = Translator.number(devmem["base"])
|
||||
if "size" in devmem:
|
||||
result["region_size"] = Translator.number(devmem["size"])
|
||||
|
||||
if "irq" in dev:
|
||||
devint = dev["irq"]
|
||||
if "lane" in devint:
|
||||
result["irq_lane"] = Translator.number(devint["lane"])
|
||||
if "flags" in devint:
|
||||
result["irq_flags"] = Translator.flags(
|
||||
devint["flags"], Translator.irq_flag_translator)
|
||||
if "priority" in devint:
|
||||
result["irq_priority"] = Translator.number(devint["priority"])
|
||||
|
||||
if "aux1" in dev:
|
||||
result["aux1"] = Translator.number(dev["aux1"])
|
||||
if "aux2" in dev:
|
||||
result["aux2"] = Translator.number(dev["aux2"])
|
||||
if "aux3" in dev:
|
||||
result["aux3"] = Translator.number(dev["aux3"])
|
||||
if "aux4" in dev:
|
||||
result["aux4"] = Translator.number(dev["aux4"])
|
||||
|
||||
self.devs_binarr += DEVTREE_ENTRY.build(result)
|
||||
self.names_binarr += bytearray((map(ord,
|
||||
dev["name"]))) + bytearray([0])
|
||||
|
||||
def build_dev_list(self):
|
||||
self.devs_binarr = bytearray()
|
||||
self.names_binarr = bytearray()
|
||||
for dev in self.irmng.device_list():
|
||||
self.build_dev(dev)
|
||||
|
||||
def build_binarr(self):
|
||||
self.res_binarr = bytearray()
|
||||
self.header_binarr = bytearray()
|
||||
self.devs_binarr = bytearray()
|
||||
self.names_binarr = bytearray()
|
||||
|
||||
self.build_dev_list()
|
||||
self.build_header()
|
||||
|
||||
self.res_binarr = self.header_binarr + self.devs_binarr + self.names_binarr
|
||||
|
||||
if self.__debug:
|
||||
print("Header", self.header_binarr)
|
||||
print("Devs", self.devs_binarr)
|
||||
print("Names", self.names_binarr)
|
||||
print("Res", self.res_binarr)
|
||||
|
||||
def compile(self):
|
||||
if len(self.res_binarr) == 0:
|
||||
self.build_binarr()
|
||||
|
||||
return self.res_binarr
|
||||
12
utils/compilers/DevTreeCompiler/Generator/IRManager.py
Normal file
12
utils/compilers/DevTreeCompiler/Generator/IRManager.py
Normal file
@@ -0,0 +1,12 @@
|
||||
class IRManager():
|
||||
def __init__(self, parser):
|
||||
self.parser = parser
|
||||
|
||||
def platform_name(self):
|
||||
return self.parser.data()["name"]
|
||||
|
||||
def device_count(self):
|
||||
return len(self.parser.data()["devices"])
|
||||
|
||||
def device_list(self):
|
||||
return self.parser.data()["devices"]
|
||||
20
utils/compilers/DevTreeCompiler/Parser/Parser.py
Normal file
20
utils/compilers/DevTreeCompiler/Parser/Parser.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import json
|
||||
|
||||
class Parser():
|
||||
def __init__(self, input_file):
|
||||
self.decoded_data = None
|
||||
self.input_file = input_file
|
||||
|
||||
def data(self):
|
||||
if self.decoded_data is None:
|
||||
f = open(self.input_file)
|
||||
self.decoded_data = json.load(f)
|
||||
f.close()
|
||||
|
||||
return self.decoded_data
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
18
utils/compilers/DevTreeCompiler/__main__.py
Normal file
18
utils/compilers/DevTreeCompiler/__main__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from DevTreeCompiler import DevTreeCompiler
|
||||
import argparse
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('i', type=str, help='Input file')
|
||||
parser.add_argument('o', type=str, help='Output file')
|
||||
parser.add_argument('--dumpc', action='store_true',
|
||||
help='Dumping C array')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.dumpc:
|
||||
DevTreeCompiler.to_c_arr(args.i)
|
||||
else:
|
||||
DevTreeCompiler.compile(args.i, args.o)
|
||||
74
utils/crypto/elfsign.py
Normal file
74
utils/crypto/elfsign.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import sys
|
||||
import hashlib
|
||||
import argparse
|
||||
import subprocess
|
||||
import codecs
|
||||
from elftools.elf.elffile import ELFFile
|
||||
|
||||
# Test Key Set.
|
||||
PRIVATE_D = 0x38df48893d28df263487987da506d0c56d5f817573bc091071c5cd798d78ba4c996f946a2d695b9b4428794c59500ba2e5b2ed383aa791b18f56fd90875010eddb98c37113f9717511a2845edbcc85e9c559f74d474c107767666f894357c1439217ce82d0181a58ce9a8ad75e3a37ccb3aeba5ba07dd2f8cb8b92ae2735e275
|
||||
PUBLIC_N = 0xa65f3fc1e6e850b5c7174fb7c0b30f36507ad9a67e83abe707b176fb7f44230711ffe8291119d921e0e483024dc6e603a628996ec2c875069aef21f1b9e9cfbcd3f7ee6f4ecf1bc67d9d7239adef94596038b4b5833ee34b4ddad75b17ad8f8fbb2cd5149115602aa57aecdfdacbea8a7fe735c98784a4b1868986dced517e1d
|
||||
PUBLIC_E = 0x10001
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('target', type=str, help='Target third_part package')
|
||||
parser.add_argument('--overwrite', action='store_true',
|
||||
help='Overwrites exisiting signature')
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
def shell(cmd, cwd=None):
|
||||
return subprocess.check_output(cmd, shell=True, cwd=cwd).decode("ascii")
|
||||
|
||||
|
||||
elffile_path = args.target
|
||||
signfield_loc = 0
|
||||
sha256_hash = hashlib.sha256()
|
||||
|
||||
def int_to_bytes(n, nsize=128):
|
||||
xs = bytearray()
|
||||
while (n):
|
||||
by = n % 256
|
||||
xs.append(by)
|
||||
n //= 256
|
||||
|
||||
while len(xs) < nsize:
|
||||
xs.append(0)
|
||||
|
||||
return xs
|
||||
|
||||
with open(elffile_path, 'rb') as elffile:
|
||||
signature_section = None
|
||||
for section in ELFFile(elffile).iter_sections():
|
||||
if section.name == "._signature":
|
||||
signature_section = section
|
||||
signfield_loc = section['sh_offset']
|
||||
|
||||
if signature_section is None:
|
||||
print("Elf file is not configured for signing.")
|
||||
exit(1)
|
||||
|
||||
if not args.overwrite:
|
||||
for byte in signature_section.data():
|
||||
if byte != 0:
|
||||
print("Signature:", ''.join('{:02x}'.format(x)
|
||||
for x in signature_section.data()[:128]))
|
||||
|
||||
print("Elf file is already signed.")
|
||||
exit(0)
|
||||
|
||||
signfield_loc = signature_section['sh_offset']
|
||||
|
||||
for segment in ELFFile(elffile).iter_segments():
|
||||
seg_head = segment.header
|
||||
if seg_head.p_type == "PT_LOAD":
|
||||
sha256_hash.update(segment.data())
|
||||
|
||||
|
||||
hash = int(codecs.encode(sha256_hash.digest(), 'hex'), 16)
|
||||
signature = pow(hash, PRIVATE_D, PUBLIC_N)
|
||||
|
||||
signature_file = open(elffile_path, "r+b")
|
||||
signature_file.seek(signfield_loc)
|
||||
signature_file.write(int_to_bytes(signature))
|
||||
signature_file.close()
|
||||
BIN
utils/legacy/app_installer.exec
Normal file
BIN
utils/legacy/app_installer.exec
Normal file
Binary file not shown.
9
utils/legacy/config.py
Normal file
9
utils/legacy/config.py
Normal file
@@ -0,0 +1,9 @@
|
||||
format_settings = {
|
||||
'BytesPerSector': 0x200, # 512
|
||||
'SectorPerClustor': 0x1, # 1
|
||||
'ReservedSectors': 200 , # 100 kb
|
||||
'NumberOfFATs': 0x2,
|
||||
'RootEntires': 0x200,
|
||||
'VolumeName': 'xOSHD',
|
||||
'filename': 'one.img'
|
||||
}
|
||||
112
utils/legacy/copy_bin.py
Normal file
112
utils/legacy/copy_bin.py
Normal file
@@ -0,0 +1,112 @@
|
||||
from config import *
|
||||
|
||||
copyTo = 'one.img'
|
||||
copyFrom = 'products/kernel.bin'
|
||||
kernelSize = 0
|
||||
|
||||
def open_file():
|
||||
global kernelSize
|
||||
kernelSize = 0
|
||||
# dd = [0, 0]
|
||||
dd = []
|
||||
|
||||
with open(copyFrom, "rb") as f:
|
||||
byte = f.read(1)
|
||||
dd.append(int.from_bytes(byte, byteorder='little'))
|
||||
kernelSize += 1
|
||||
while byte != b"":
|
||||
kernelSize += 1
|
||||
byte = f.read(1)
|
||||
dd.append(int.from_bytes(byte, byteorder='little'))
|
||||
return dd
|
||||
|
||||
def open_app(fname):
|
||||
dd = []
|
||||
|
||||
with open(fname, "rb") as f:
|
||||
byte = f.read(1)
|
||||
dd.append(int.from_bytes(byte, byteorder='little'))
|
||||
while byte != b"":
|
||||
byte = f.read(1)
|
||||
dd.append(int.from_bytes(byte, byteorder='little'))
|
||||
return dd
|
||||
|
||||
def write_file(dd, withPos = 0):
|
||||
with open(copyTo, "rb+") as f:
|
||||
f.seek(withPos, 0)
|
||||
for x in dd:
|
||||
f.write(x.to_bytes(1, 'little', signed=False))
|
||||
|
||||
def append(res, plus, offset):
|
||||
for (id,el) in enumerate(plus):
|
||||
res[id+offset] = el
|
||||
return res
|
||||
|
||||
def get_from(offset, len):
|
||||
global dd
|
||||
res = 0
|
||||
mt = 1
|
||||
for i in range(len):
|
||||
res += dd[offset + i] * mt
|
||||
mt *= 256
|
||||
return res
|
||||
|
||||
dd = open_file()
|
||||
|
||||
e_shoff = get_from(0x20, 4)
|
||||
e_shentsize = get_from(0x2E, 2)
|
||||
e_shnum = get_from(0x30, 2)
|
||||
e_shstrndx = get_from(0x32, 2)
|
||||
print("Start of the section header table ", e_shoff, e_shentsize, e_shnum, e_shstrndx)
|
||||
|
||||
text_size = 0
|
||||
data_size = 0
|
||||
bss_size = 0
|
||||
|
||||
e_shoff_now = e_shoff
|
||||
txt_offset = get_from(e_shoff + e_shstrndx * e_shentsize + 0x10, 4)
|
||||
for i in range(e_shnum + 1):
|
||||
if i == e_shstrndx:
|
||||
continue
|
||||
name = ""
|
||||
sh_name = get_from(e_shoff_now + 0x00, 4)
|
||||
print(sh_name)
|
||||
while dd[txt_offset+sh_name] != 0:
|
||||
name += chr(dd[txt_offset+sh_name])
|
||||
sh_name+=1
|
||||
sh_type = get_from(e_shoff_now + 0x04, 4)
|
||||
sh_size = get_from(e_shoff_now + 0x14, 4)
|
||||
sh_entsize = get_from(e_shoff_now + 0x24, 4)
|
||||
if name == '.text':
|
||||
text_size += sh_size
|
||||
if name == '.rodata':
|
||||
text_size += sh_size
|
||||
if name == '.eh_frame':
|
||||
text_size += sh_size
|
||||
if name == '.data':
|
||||
data_size += sh_size
|
||||
if name == '.bss':
|
||||
bss_size += sh_size
|
||||
print(name, sh_type, sh_size, sh_entsize)
|
||||
e_shoff_now += e_shentsize
|
||||
|
||||
|
||||
print("text: {0}, data: {1}, bss:{2}".format(text_size, data_size, bss_size))
|
||||
|
||||
kernelSize = text_size + data_size + bss_size
|
||||
kernelSize += 2 # Including offset of size
|
||||
kernelSizeKb = (kernelSize + 1024 - 1) // 1024
|
||||
kernelSizeKb1 = kernelSizeKb % 256
|
||||
kernelSizeKb2 = kernelSizeKb // 256
|
||||
print("Kernel Size (KB) ", kernelSizeKb)
|
||||
d_kernel = [0, 0]
|
||||
d_kernel[0] = kernelSizeKb1
|
||||
d_kernel[1] = kernelSizeKb2
|
||||
|
||||
d_kernel += dd[0x1000:(0x1000 + text_size)]
|
||||
# print_g(d_kernel)
|
||||
|
||||
zeroes = [0] * (512 * (format_settings['ReservedSectors'] - 1))
|
||||
write_file(zeroes, 512)
|
||||
write_file(d_kernel, 512)
|
||||
print(kernelSize)
|
||||
208
utils/legacy/fat16_formatter.py
Normal file
208
utils/legacy/fat16_formatter.py
Normal file
@@ -0,0 +1,208 @@
|
||||
# implemented in python to support all platforms
|
||||
# xOS
|
||||
# fat16 in xOS
|
||||
|
||||
from config import format_settings
|
||||
|
||||
filename = format_settings['filename']
|
||||
|
||||
file_descriptor = {
|
||||
'name': filename,
|
||||
'size': 0, #(bytes)
|
||||
}
|
||||
|
||||
def open_file():
|
||||
size = 0
|
||||
with open(filename, "rb") as f:
|
||||
byte = f.read(1)
|
||||
while byte != b"":
|
||||
size+=1
|
||||
byte = f.read(1)
|
||||
|
||||
file_descriptor['size'] = size
|
||||
|
||||
def write_file(dd, withPos = 0):
|
||||
with open(filename, "rb+") as f:
|
||||
f.seek(withPos, 0)
|
||||
f.write(bytes(x for x in dd))
|
||||
|
||||
def merge(res, plus, offset):
|
||||
for (id,el) in enumerate(plus):
|
||||
res[id + offset] = el
|
||||
return res
|
||||
|
||||
def test_id():
|
||||
result = bytearray(8)
|
||||
textCode = 'xOSToP'
|
||||
for (id,el) in enumerate(textCode):
|
||||
result[id] = ord(el)
|
||||
return result
|
||||
|
||||
def bytes_per_sector():
|
||||
result = bytearray(2)
|
||||
result[0] = format_settings['BytesPerSector'] % 256
|
||||
result[1] = format_settings['BytesPerSector'] // 256
|
||||
return result
|
||||
|
||||
def sectors_per_cluster():
|
||||
result = bytearray(1)
|
||||
result[0] = format_settings['SectorPerClustor'] % 256
|
||||
return result
|
||||
|
||||
def number_of_fats():
|
||||
result = bytearray(1)
|
||||
result[0] = format_settings['NumberOfFATs'] % 256
|
||||
return result
|
||||
|
||||
def root_entires():
|
||||
result = bytearray(2)
|
||||
result[0] = format_settings['RootEntires'] % 256
|
||||
result[1] = format_settings['RootEntires'] // 256
|
||||
return result
|
||||
|
||||
def load_kernel():
|
||||
result = bytearray(2)
|
||||
result[0] = format_settings['ReservedSectors'] % 256
|
||||
result[1] = format_settings['ReservedSectors'] // 256
|
||||
return result
|
||||
|
||||
def reserved_sectors():
|
||||
result = bytearray(2)
|
||||
result[0] = format_settings['ReservedSectors'] % 256
|
||||
result[1] = format_settings['ReservedSectors'] // 256
|
||||
return result
|
||||
|
||||
def volume_label():
|
||||
result = bytearray(11)
|
||||
for i in range(11):
|
||||
result[i] = 0x20
|
||||
textCode = format_settings['VolumeName']
|
||||
for (id,el) in enumerate(textCode):
|
||||
result[id] = ord(el)
|
||||
return result
|
||||
|
||||
def system_id():
|
||||
result = bytearray(8)
|
||||
for i in range(8):
|
||||
result[i] = 0x20
|
||||
textCode = 'FAT16'
|
||||
for (id,el) in enumerate(textCode):
|
||||
result[id] = ord(el)
|
||||
return result
|
||||
|
||||
def header():
|
||||
result = bytearray(512)
|
||||
result = merge(result, test_id(), 0x3)
|
||||
result = merge(result, bytes_per_sector(), 0xB)
|
||||
result = merge(result, sectors_per_cluster(), 0xD)
|
||||
result = merge(result, reserved_sectors(), 0xE)
|
||||
result = merge(result, number_of_fats(), 0x10)
|
||||
result = merge(result, root_entires(), 0x11)
|
||||
result = merge(result, sectors_per_fat(), 0x16)
|
||||
result = merge(result, volume_label(), 0x2B)
|
||||
result = merge(result, system_id(), 0x36)
|
||||
result[511] = 0x00
|
||||
result[510] = 0x00
|
||||
print_g(result)
|
||||
return result
|
||||
|
||||
def fat_size():
|
||||
if (format_settings['RootEntires'] * 32) % 512 != 0:
|
||||
print("RootEntires error [couldn't fit into sectors]")
|
||||
exit(0)
|
||||
root_dir_sectors = (format_settings['RootEntires'] * 32) // 512
|
||||
load_sectors = format_settings['ReservedSectors']
|
||||
data_sectors = file_descriptor['size'] // 512 - root_dir_sectors - load_sectors
|
||||
for fat_sectors in range(1, 256):
|
||||
free_data_sectors = data_sectors - fat_sectors * format_settings['NumberOfFATs']
|
||||
covered_data_sectors = fat_sectors * 512 // 2 - 2
|
||||
print(free_data_sectors, covered_data_sectors)
|
||||
if free_data_sectors <= format_settings['SectorPerClustor'] * covered_data_sectors:
|
||||
return fat_sectors
|
||||
return 256
|
||||
|
||||
def sectors_per_fat():
|
||||
fsize = fat_size()
|
||||
result = bytearray(2)
|
||||
result[0] = fsize % 256
|
||||
result[1] = fsize // 256
|
||||
return result
|
||||
|
||||
def fat():
|
||||
fsize = fat_size()
|
||||
print(fsize)
|
||||
result = bytearray(512 * fsize)
|
||||
result[0] = 0xf8
|
||||
result[1] = 0xff
|
||||
result[2] = 0xff
|
||||
result[3] = 0xff
|
||||
root_dir_sectors = (format_settings['RootEntires'] * 32) // 512
|
||||
load_sectors = format_settings['ReservedSectors']
|
||||
data_sectors = file_descriptor['size'] // 512 - root_dir_sectors - load_sectors
|
||||
free_data_sectors = data_sectors - fsize * format_settings['NumberOfFATs']
|
||||
covered_data_clusters = fsize * 512 // 2 - 2
|
||||
data_clusters = (free_data_sectors - format_settings['SectorPerClustor'] + 1) // format_settings['SectorPerClustor'] + 1
|
||||
unused_clusters = covered_data_clusters - data_clusters
|
||||
print(covered_data_clusters, data_clusters, unused_clusters)
|
||||
if unused_clusters < 0:
|
||||
print("Error with clusters")
|
||||
exit(0)
|
||||
for i in range(unused_clusters):
|
||||
result[512 * fsize - 2 * unused_clusters + 2 * i] = 0xff
|
||||
result[512 * fsize - 2 * unused_clusters + 2 * i + 1] = 0xff
|
||||
|
||||
print_g(result)
|
||||
return result
|
||||
|
||||
def root_dir():
|
||||
result = bytearray(format_settings['RootEntires'] * 32)
|
||||
return result
|
||||
|
||||
def print_gh(el):
|
||||
if (el >= 10):
|
||||
return chr(el - 10 + ord('A'))
|
||||
return chr(el + ord('0'))
|
||||
|
||||
def print_g(result):
|
||||
for (id,el) in enumerate(result):
|
||||
if id % 16 == 0:
|
||||
print()
|
||||
print(print_gh(el//16), end="")
|
||||
print(print_gh(el%16), end=" ")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("xOS Formatter")
|
||||
print("To change setting change config file")
|
||||
open_file()
|
||||
|
||||
pos = 0
|
||||
|
||||
print("writing header")
|
||||
print("Header starts at ", pos)
|
||||
# writing header
|
||||
header_e = header()
|
||||
write_file(header_e, pos)
|
||||
pos += format_settings['ReservedSectors'] * 512
|
||||
print("Kernel starts at ", 512)
|
||||
|
||||
print("writing fats")
|
||||
print("Fats start at ", pos)
|
||||
# writing fats
|
||||
fat_e = fat()
|
||||
for i in range(format_settings['NumberOfFATs']):
|
||||
write_file(fat_e, pos)
|
||||
pos += len(fat_e)
|
||||
|
||||
print("writing root_dir")
|
||||
print("Root dir starts at ", pos)
|
||||
# writing root_dir
|
||||
root_dir_e = root_dir()
|
||||
write_file(root_dir_e, pos)
|
||||
pos += len(root_dir_e)
|
||||
|
||||
write_file(bytearray(file_descriptor['size'] - pos), pos)
|
||||
|
||||
print("Data starts at ", pos)
|
||||
print("Data size is ", file_descriptor['size'] - pos)
|
||||
print("Data size is (Mb)", (file_descriptor['size'] - pos) / (1024 * 1024))
|
||||
33
utils/legacy/install_apps.py
Normal file
33
utils/legacy/install_apps.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import os
|
||||
|
||||
ignore = [
|
||||
'.DS_Store'
|
||||
]
|
||||
|
||||
apps_dir = 'home'
|
||||
apps_dir_full = os.getcwd() + '/' + apps_dir
|
||||
apps_dir_len = len(apps_dir)
|
||||
installer = './utils/app_installer.exec'
|
||||
print(apps_dir)
|
||||
|
||||
folders = []
|
||||
files = []
|
||||
|
||||
# r = root, d = directories, f = files
|
||||
for r, d, f in os.walk(apps_dir):
|
||||
for folder in d:
|
||||
folders.append((r[apps_dir_len:]+'/', folder))
|
||||
for file in f:
|
||||
files.append((r[apps_dir_len:]+'/', file))
|
||||
|
||||
for path, fname in folders:
|
||||
cmd = "{0} mkdir {1} {2}".format(installer, path, fname)
|
||||
print(cmd)
|
||||
os.system(cmd)
|
||||
|
||||
for path, fname in files:
|
||||
print (apps_dir+'/'+fname)
|
||||
cmd = "{0} writefile {1} {2} {3}".format(installer, path, fname, apps_dir+'/'+fname)
|
||||
if not (fname in ignore):
|
||||
print(cmd)
|
||||
os.system(cmd)
|
||||
38
utils/legacy/interrgen.py
Normal file
38
utils/legacy/interrgen.py
Normal file
@@ -0,0 +1,38 @@
|
||||
for i in range(32):
|
||||
print("""
|
||||
isr{0}:
|
||||
cli
|
||||
pusha
|
||||
push ds
|
||||
push es
|
||||
push fs
|
||||
push gs
|
||||
push esp
|
||||
push byte {0}
|
||||
jmp isr_common_bottom
|
||||
""".format(i)
|
||||
)
|
||||
|
||||
|
||||
for i in range(32, 40):
|
||||
print("""
|
||||
irq{0}:
|
||||
cli
|
||||
pusha
|
||||
push ds
|
||||
push es
|
||||
push fs
|
||||
push gs
|
||||
push esp
|
||||
push byte {1}
|
||||
jmp irq_master_common_bottom
|
||||
""".format(i-32, i)
|
||||
)
|
||||
|
||||
|
||||
for i in range(40, 48):
|
||||
print("""
|
||||
syscall{0}:
|
||||
|
||||
""".format(i-32, i)
|
||||
)
|
||||
3
utils/python_requirements.txt
Normal file
3
utils/python_requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
construct==2.10.67
|
||||
gitpython==3.1.27
|
||||
pyelftools==0.28
|
||||
16
utils/qprof/AddrResolver.py
Normal file
16
utils/qprof/AddrResolver.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
|
||||
class AddrResolver:
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
self.cache = {}
|
||||
|
||||
def get(self, addr):
|
||||
if addr not in self.cache:
|
||||
s = subprocess.check_output(
|
||||
"i686-elf-addr2line --demangle -fsp -e " + self.path + " " + hex(addr), shell=True)
|
||||
s = s.decode("ascii")
|
||||
self.cache[addr] = s.split(" ")[0]
|
||||
return self.cache[addr]
|
||||
61
utils/qprof/ProfNode.py
Normal file
61
utils/qprof/ProfNode.py
Normal file
@@ -0,0 +1,61 @@
|
||||
from AddrResolver import AddrResolver
|
||||
|
||||
|
||||
class Profiler:
|
||||
def __init__(self, filename):
|
||||
self.addr_resolver = AddrResolver(filename)
|
||||
|
||||
def resolver(self):
|
||||
return self.addr_resolver
|
||||
|
||||
|
||||
class ProfNode:
|
||||
def __init__(self, profiler, pc=0):
|
||||
self.profiler = profiler
|
||||
self.pc = pc
|
||||
self.name = "NOT SET"
|
||||
self.call_count = 0
|
||||
self.children = {}
|
||||
self.children_by_name = {}
|
||||
|
||||
def merge(self, node):
|
||||
self.call_count += node.call_count
|
||||
self.children = {**self.children, **node.children}
|
||||
|
||||
def add_stacktrace(self, stacktrace):
|
||||
if len(stacktrace) == 0:
|
||||
return
|
||||
child_pc = stacktrace[-1]
|
||||
if child_pc in self.children.keys():
|
||||
self.children[child_pc].call_count += 1
|
||||
else:
|
||||
self.children[child_pc] = ProfNode(self.profiler, child_pc)
|
||||
self.children[child_pc].call_count += 1
|
||||
|
||||
self.children[child_pc].add_stacktrace(stacktrace[:-1])
|
||||
|
||||
def process_node(self):
|
||||
named_nodes = {}
|
||||
|
||||
for k, v in self.children.items():
|
||||
v.process_node()
|
||||
|
||||
func_name = self.profiler.resolver().get(k)
|
||||
if func_name in named_nodes.keys():
|
||||
named_nodes[func_name].merge(v)
|
||||
else:
|
||||
named_nodes[func_name] = ProfNode(self.profiler)
|
||||
named_nodes[func_name].name = func_name
|
||||
named_nodes[func_name].merge(v)
|
||||
self.children = named_nodes
|
||||
|
||||
def print(self, tabs=0):
|
||||
print('{} {:>4} {}'.format(" "*tabs, self.call_count, self.name))
|
||||
|
||||
def trace(self, tabs=0):
|
||||
self.print(tabs)
|
||||
|
||||
rd = sorted(self.children.values(),
|
||||
key=lambda node: node.call_count, reverse=True)
|
||||
for v in rd:
|
||||
v.trace(tabs + 1)
|
||||
39
utils/qprof/QConn.py
Normal file
39
utils/qprof/QConn.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from QMP import QEMUMonitorProtocol
|
||||
import re
|
||||
|
||||
|
||||
class QConn:
|
||||
def __init__(self, path):
|
||||
self.qconn = QEMUMonitorProtocol(path)
|
||||
self.qconn.connect()
|
||||
self.runs = True
|
||||
self.regs_cache = ""
|
||||
|
||||
def drop_cache(self):
|
||||
self.regs_cache = ""
|
||||
|
||||
def stop(self):
|
||||
self.human_cmd("stop")
|
||||
self.runs = False
|
||||
self.drop_cache()
|
||||
|
||||
def cont(self):
|
||||
self.human_cmd("cont")
|
||||
self.runs = True
|
||||
|
||||
def gpreg(self, name):
|
||||
if self.runs or self.regs_cache == "":
|
||||
self.regs_cache, err = self.human_cmd("info registers")
|
||||
name = name.upper()
|
||||
fd = re.search("{}=([\w]+)".format(name), self.regs_cache)
|
||||
if fd is None:
|
||||
return ""
|
||||
return fd.group(1)
|
||||
|
||||
def human_cmd(self, line):
|
||||
args = {}
|
||||
args["command-line"] = line
|
||||
resp = self.qconn.cmd("human-monitor-command", args)
|
||||
if "error" in resp:
|
||||
return (resp["error"]["desc"], 1)
|
||||
return (resp["return"], 0)
|
||||
256
utils/qprof/QMP.py
Normal file
256
utils/qprof/QMP.py
Normal file
@@ -0,0 +1,256 @@
|
||||
# QEMU Monitor Protocol Python class
|
||||
#
|
||||
# Copyright (C) 2009, 2010 Red Hat Inc.
|
||||
#
|
||||
# Authors:
|
||||
# Luiz Capitulino <lcapitulino@redhat.com>
|
||||
#
|
||||
# This work is licensed under the terms of the GNU GPL, version 2. See
|
||||
# the COPYING file in the top-level directory.
|
||||
|
||||
import json
|
||||
import errno
|
||||
import socket
|
||||
import logging
|
||||
|
||||
|
||||
class QMPError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class QMPConnectError(QMPError):
|
||||
pass
|
||||
|
||||
|
||||
class QMPCapabilitiesError(QMPError):
|
||||
pass
|
||||
|
||||
|
||||
class QMPTimeoutError(QMPError):
|
||||
pass
|
||||
|
||||
|
||||
class QEMUMonitorProtocol(object):
|
||||
|
||||
#: Logger object for debugging messages
|
||||
logger = logging.getLogger('QMP')
|
||||
#: Socket's error class
|
||||
error = socket.error
|
||||
#: Socket's timeout
|
||||
timeout = socket.timeout
|
||||
|
||||
def __init__(self, address, server=False):
|
||||
"""
|
||||
Create a QEMUMonitorProtocol class.
|
||||
|
||||
@param address: QEMU address, can be either a unix socket path (string)
|
||||
or a tuple in the form ( address, port ) for a TCP
|
||||
connection
|
||||
@param server: server mode listens on the socket (bool)
|
||||
@raise socket.error on socket connection errors
|
||||
@note No connection is established, this is done by the connect() or
|
||||
accept() methods
|
||||
"""
|
||||
self.__events = []
|
||||
self.__address = address
|
||||
self.__sock = self.__get_sock()
|
||||
self.__sockfile = None
|
||||
if server:
|
||||
self.__sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
self.__sock.bind(self.__address)
|
||||
self.__sock.listen(1)
|
||||
|
||||
def __get_sock(self):
|
||||
if isinstance(self.__address, tuple):
|
||||
family = socket.AF_INET
|
||||
else:
|
||||
family = socket.AF_UNIX
|
||||
return socket.socket(family, socket.SOCK_STREAM)
|
||||
|
||||
def __negotiate_capabilities(self):
|
||||
greeting = self.__json_read()
|
||||
if greeting is None or "QMP" not in greeting:
|
||||
raise QMPConnectError
|
||||
# Greeting seems ok, negotiate capabilities
|
||||
resp = self.cmd('qmp_capabilities')
|
||||
if "return" in resp:
|
||||
return greeting
|
||||
raise QMPCapabilitiesError
|
||||
|
||||
def __json_read(self, only_event=False):
|
||||
while True:
|
||||
data = self.__sockfile.readline()
|
||||
if not data:
|
||||
return
|
||||
resp = json.loads(data)
|
||||
if 'event' in resp:
|
||||
self.logger.debug("<<< %s", resp)
|
||||
self.__events.append(resp)
|
||||
if not only_event:
|
||||
continue
|
||||
return resp
|
||||
|
||||
def __get_events(self, wait=False):
|
||||
"""
|
||||
Check for new events in the stream and cache them in __events.
|
||||
|
||||
@param wait (bool): block until an event is available.
|
||||
@param wait (float): If wait is a float, treat it as a timeout value.
|
||||
|
||||
@raise QMPTimeoutError: If a timeout float is provided and the timeout
|
||||
period elapses.
|
||||
@raise QMPConnectError: If wait is True but no events could be
|
||||
retrieved or if some other error occurred.
|
||||
"""
|
||||
|
||||
# Check for new events regardless and pull them into the cache:
|
||||
self.__sock.setblocking(0)
|
||||
try:
|
||||
self.__json_read()
|
||||
except socket.error as err:
|
||||
if err[0] == errno.EAGAIN:
|
||||
# No data available
|
||||
pass
|
||||
self.__sock.setblocking(1)
|
||||
|
||||
# Wait for new events, if needed.
|
||||
# if wait is 0.0, this means "no wait" and is also implicitly false.
|
||||
if not self.__events and wait:
|
||||
if isinstance(wait, float):
|
||||
self.__sock.settimeout(wait)
|
||||
try:
|
||||
ret = self.__json_read(only_event=True)
|
||||
except socket.timeout:
|
||||
raise QMPTimeoutError("Timeout waiting for event")
|
||||
except:
|
||||
raise QMPConnectError("Error while reading from socket")
|
||||
if ret is None:
|
||||
raise QMPConnectError("Error while reading from socket")
|
||||
self.__sock.settimeout(None)
|
||||
|
||||
def connect(self, negotiate=True):
|
||||
"""
|
||||
Connect to the QMP Monitor and perform capabilities negotiation.
|
||||
|
||||
@return QMP greeting dict
|
||||
@raise socket.error on socket connection errors
|
||||
@raise QMPConnectError if the greeting is not received
|
||||
@raise QMPCapabilitiesError if fails to negotiate capabilities
|
||||
"""
|
||||
self.__sock.connect(self.__address)
|
||||
self.__sockfile = self.__sock.makefile()
|
||||
if negotiate:
|
||||
return self.__negotiate_capabilities()
|
||||
|
||||
def accept(self):
|
||||
"""
|
||||
Await connection from QMP Monitor and perform capabilities negotiation.
|
||||
|
||||
@return QMP greeting dict
|
||||
@raise socket.error on socket connection errors
|
||||
@raise QMPConnectError if the greeting is not received
|
||||
@raise QMPCapabilitiesError if fails to negotiate capabilities
|
||||
"""
|
||||
self.__sock.settimeout(15)
|
||||
self.__sock, _ = self.__sock.accept()
|
||||
self.__sockfile = self.__sock.makefile()
|
||||
return self.__negotiate_capabilities()
|
||||
|
||||
def cmd_obj(self, qmp_cmd):
|
||||
"""
|
||||
Send a QMP command to the QMP Monitor.
|
||||
|
||||
@param qmp_cmd: QMP command to be sent as a Python dict
|
||||
@return QMP response as a Python dict or None if the connection has
|
||||
been closed
|
||||
"""
|
||||
self.logger.debug(">>> %s", qmp_cmd)
|
||||
try:
|
||||
self.__sock.sendall(json.dumps(qmp_cmd).encode('utf-8'))
|
||||
except socket.error as err:
|
||||
if err[0] == errno.EPIPE:
|
||||
return
|
||||
raise socket.error(err)
|
||||
resp = self.__json_read()
|
||||
self.logger.debug("<<< %s", resp)
|
||||
return resp
|
||||
|
||||
def cmd(self, name, args=None, cmd_id=None):
|
||||
"""
|
||||
Build a QMP command and send it to the QMP Monitor.
|
||||
|
||||
@param name: command name (string)
|
||||
@param args: command arguments (dict)
|
||||
@param cmd_id: command id (dict, list, string or int)
|
||||
"""
|
||||
qmp_cmd = {'execute': name}
|
||||
if args:
|
||||
qmp_cmd['arguments'] = args
|
||||
if cmd_id:
|
||||
qmp_cmd['id'] = cmd_id
|
||||
return self.cmd_obj(qmp_cmd)
|
||||
|
||||
def command(self, cmd, **kwds):
|
||||
"""
|
||||
Build and send a QMP command to the monitor, report errors if any
|
||||
"""
|
||||
ret = self.cmd(cmd, kwds)
|
||||
if "error" in ret:
|
||||
raise Exception(ret['error']['desc'])
|
||||
return ret['return']
|
||||
|
||||
def pull_event(self, wait=False):
|
||||
"""
|
||||
Pulls a single event.
|
||||
|
||||
@param wait (bool): block until an event is available.
|
||||
@param wait (float): If wait is a float, treat it as a timeout value.
|
||||
|
||||
@raise QMPTimeoutError: If a timeout float is provided and the timeout
|
||||
period elapses.
|
||||
@raise QMPConnectError: If wait is True but no events could be
|
||||
retrieved or if some other error occurred.
|
||||
|
||||
@return The first available QMP event, or None.
|
||||
"""
|
||||
self.__get_events(wait)
|
||||
|
||||
if self.__events:
|
||||
return self.__events.pop(0)
|
||||
return None
|
||||
|
||||
def get_events(self, wait=False):
|
||||
"""
|
||||
Get a list of available QMP events.
|
||||
|
||||
@param wait (bool): block until an event is available.
|
||||
@param wait (float): If wait is a float, treat it as a timeout value.
|
||||
|
||||
@raise QMPTimeoutError: If a timeout float is provided and the timeout
|
||||
period elapses.
|
||||
@raise QMPConnectError: If wait is True but no events could be
|
||||
retrieved or if some other error occurred.
|
||||
|
||||
@return The list of available QMP events.
|
||||
"""
|
||||
self.__get_events(wait)
|
||||
return self.__events
|
||||
|
||||
def clear_events(self):
|
||||
"""
|
||||
Clear current list of pending events.
|
||||
"""
|
||||
self.__events = []
|
||||
|
||||
def close(self):
|
||||
self.__sock.close()
|
||||
self.__sockfile.close()
|
||||
|
||||
def settimeout(self, timeout):
|
||||
self.__sock.settimeout(timeout)
|
||||
|
||||
def get_sock_fd(self):
|
||||
return self.__sock.fileno()
|
||||
|
||||
def is_scm_available(self):
|
||||
return self.__sock.family == socket.AF_UNIX
|
||||
97
utils/qprof/qprof
Normal file
97
utils/qprof/qprof
Normal file
@@ -0,0 +1,97 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import code
|
||||
import argparse
|
||||
from time import sleep
|
||||
from ProfNode import ProfNode, Profiler
|
||||
from QConn import QConn
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("path", type=str, help="Monitor address (required)")
|
||||
parser.add_argument("--symbols", type=str, help="Symbols (required)")
|
||||
parser.add_argument("--duration", type=int, default=10,
|
||||
help="Duration (default=10)")
|
||||
parser.add_argument("--frequency", type=float, default=0.05,
|
||||
help="Frequency (default=0.05)")
|
||||
parser.add_argument("--nointeractive", action="store_true",
|
||||
help="Not run interactive mode")
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.path.exists(args.symbols):
|
||||
print("Can't open symbols: {}", args.symbols)
|
||||
exit(1)
|
||||
|
||||
if not os.path.exists(args.path):
|
||||
print("Can't open qemu monitor socket: {}", args.path)
|
||||
exit(1)
|
||||
|
||||
|
||||
qconn = QConn(args.path)
|
||||
|
||||
|
||||
def get_stacktrace():
|
||||
PAGE_SIZE = 0x1000
|
||||
|
||||
stacktrace = []
|
||||
|
||||
qconn.stop()
|
||||
ip = int(qconn.gpreg("eip"), 16)
|
||||
sp = int(qconn.gpreg("esp"), 16)
|
||||
bp = int(qconn.gpreg("ebp"), 16)
|
||||
stacktrace.append(ip)
|
||||
|
||||
bottomsp = sp & ~(PAGE_SIZE - 1)
|
||||
memdata, err = qconn.human_cmd(
|
||||
"x/{}x ".format(PAGE_SIZE // 4) + hex(bottomsp))
|
||||
if err:
|
||||
return []
|
||||
|
||||
qconn.cont()
|
||||
|
||||
lines = memdata.split("\r\n")
|
||||
memmap = {}
|
||||
for i in lines:
|
||||
data = i.split(" ")
|
||||
if (len(data) != 5):
|
||||
continue
|
||||
addr = int(data[0][:-1], 16)
|
||||
memmap[addr] = int(data[1], 0)
|
||||
memmap[addr+4] = int(data[2], 0)
|
||||
memmap[addr+8] = int(data[3], 0)
|
||||
memmap[addr+12] = int(data[4], 0)
|
||||
|
||||
visited = set()
|
||||
while True:
|
||||
pc = memmap.get(bp + 4)
|
||||
if pc is not None:
|
||||
stacktrace.append(pc)
|
||||
visited.add(bp)
|
||||
bp = memmap.get(bp, None)
|
||||
if bp is None:
|
||||
break
|
||||
if bp in visited:
|
||||
break
|
||||
|
||||
return stacktrace
|
||||
|
||||
|
||||
profiler = Profiler(args.symbols)
|
||||
root = ProfNode(profiler)
|
||||
root.name = "root"
|
||||
|
||||
runs = int(args.duration / args.frequency)
|
||||
for i in range(runs):
|
||||
root.add_stacktrace(get_stacktrace())
|
||||
sleep(args.frequency)
|
||||
|
||||
|
||||
root.process_node()
|
||||
|
||||
if not args.nointeractive:
|
||||
print("Entering interactive mode, use root object to access profile data.")
|
||||
print(" * root.trace() - prints trace")
|
||||
code.interact(local=locals())
|
||||
else:
|
||||
root.trace()
|
||||
75
utils/test/bench.py
Normal file
75
utils/test/bench.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import subprocess
|
||||
import sys
|
||||
import os
|
||||
from tabulate import tabulate
|
||||
from termcolor import colored
|
||||
import signal
|
||||
|
||||
target_arch = sys.argv[1]
|
||||
|
||||
process = subprocess.Popen(
|
||||
"./run_tester.sh", stdout=subprocess.PIPE, preexec_fn=os.setpgrp)
|
||||
string = ""
|
||||
|
||||
sum_of_benchs = {}
|
||||
count_of_benchs = {}
|
||||
|
||||
# For github CI
|
||||
expected_benchmark_results = {
|
||||
"x86": {
|
||||
"FORK": 320000,
|
||||
"PNG LOADER": 1180000
|
||||
},
|
||||
"arm32": {
|
||||
"FORK": 954667,
|
||||
"PNG LOADER": 5176000
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def print_results():
|
||||
print(colored("Bench results:", color="white", attrs=["bold"]))
|
||||
res = []
|
||||
mper = 0.0
|
||||
for key, value in sum_of_benchs.items():
|
||||
new_val = int(value / count_of_benchs[key])
|
||||
percent = (1 - new_val /
|
||||
expected_benchmark_results[target_arch][key]) * 100
|
||||
res.append([key, expected_benchmark_results[target_arch][key],
|
||||
new_val, "{:.2f}%".format(percent)])
|
||||
mper = min(mper, percent)
|
||||
|
||||
data = tabulate(
|
||||
res, headers=['Test', 'Expected ({0})'.format(target_arch), 'Got', 'Diff'], tablefmt='orgtbl')
|
||||
print(data)
|
||||
if (mper < -50):
|
||||
print(colored("Crashing: too big performance drop ({0}%)!!!".format(
|
||||
mper), color="red", attrs=["bold"]))
|
||||
exit(1)
|
||||
|
||||
|
||||
def process_string(string):
|
||||
if (string.startswith("[BENCH]")):
|
||||
end_of_date = string.find(" (usec)")
|
||||
start_of_date = string.find("] ") + 2
|
||||
pr = string[8:start_of_date - 2]
|
||||
it = int(string[start_of_date:end_of_date])
|
||||
if count_of_benchs.get(pr, None) is None:
|
||||
sum_of_benchs[pr] = 0
|
||||
count_of_benchs[pr] = 0
|
||||
|
||||
sum_of_benchs[pr] += it
|
||||
count_of_benchs[pr] += 1
|
||||
|
||||
if (string.startswith("[BENCH END]")):
|
||||
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
|
||||
print_results()
|
||||
|
||||
|
||||
for c in iter(lambda: process.stdout.read(1), b''):
|
||||
letter = c.decode()
|
||||
if letter == "\n":
|
||||
process_string(string)
|
||||
string = ""
|
||||
else:
|
||||
string += letter
|
||||
57
utils/test/test.py
Normal file
57
utils/test/test.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import subprocess
|
||||
import sys
|
||||
import os
|
||||
from termcolor import colored
|
||||
import signal
|
||||
|
||||
# verbose
|
||||
flag = ""
|
||||
if len(sys.argv) >= 2:
|
||||
flag = sys.argv[1]
|
||||
|
||||
process = subprocess.Popen(
|
||||
"./run_tester.sh", stdout=subprocess.PIPE, preexec_fn=os.setpgrp)
|
||||
string = ""
|
||||
|
||||
|
||||
dumping_kasan = False
|
||||
|
||||
def process_string(string):
|
||||
global dumping_kasan
|
||||
|
||||
if flag == "verbose" or dumping_kasan:
|
||||
print(string)
|
||||
|
||||
if (string.startswith("[OK]")):
|
||||
print(colored("ok ", color="green", attrs=[
|
||||
"bold"]), string.replace("$", "/")[5:])
|
||||
|
||||
if (string.startswith("[MSG]")):
|
||||
print(string[6:])
|
||||
|
||||
if (string.startswith("[ALL TESTS PASSED]")):
|
||||
print(colored(string, color="green", attrs=["bold"]))
|
||||
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
|
||||
|
||||
if (string.startswith("[FAILED]")):
|
||||
print(colored("failed ", color="red", attrs=[
|
||||
"bold"]), string.replace("$", "/")[9:])
|
||||
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
|
||||
exit(1)
|
||||
|
||||
if (string.startswith("======== KASAN ERROR ========")):
|
||||
dumping_kasan = True
|
||||
print(colored(string, color="red", attrs=["bold"]))
|
||||
|
||||
if (string.startswith("=============================")):
|
||||
dumping_kasan = False
|
||||
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
|
||||
|
||||
|
||||
for c in iter(lambda: process.stdout.read(1), b''):
|
||||
letter = c.decode()
|
||||
if letter == "\n":
|
||||
process_string(string)
|
||||
string = ""
|
||||
else:
|
||||
string += letter
|
||||
Reference in New Issue
Block a user