You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
406 lines
8.8 KiB
406 lines
8.8 KiB
#include <assert.h>
|
|
#include <assert.h>
|
|
#include <math.h>
|
|
|
|
#include "types.h"
|
|
#include "sandbox_context_cache.h"
|
|
|
|
/* This file contains the stub functions that the aWsm compiler expects
|
|
* This corresponds to awsm/src/codegen/runtime_stubs.rs
|
|
* This should be linked with the *.bc file generated by aWsm in order to compile a module as a *.so
|
|
*/
|
|
|
|
extern thread_local struct sandbox_context_cache local_sandbox_context_cache;
|
|
|
|
EXPORT void
|
|
initialize_region(uint32_t offset, uint32_t region_size, uint8_t region[region_size])
|
|
{
|
|
wasm_linear_memory_initialize_region(local_sandbox_context_cache.memory, offset, region_size, region);
|
|
}
|
|
|
|
EXPORT uint32_t
|
|
instruction_memory_size()
|
|
{
|
|
return wasm_linear_memory_get_page_count(local_sandbox_context_cache.memory);
|
|
}
|
|
|
|
/**
|
|
* @brief Stub that implements the WebAssembly memory.grow instruction
|
|
*
|
|
* @param count number of pages to grow the WebAssembly linear memory by
|
|
* @return The previous size of the linear memory in pages or -1 if enough memory cannot be allocated
|
|
*/
|
|
EXPORT int32_t
|
|
instruction_memory_grow(uint32_t count)
|
|
{
|
|
int rc = local_sandbox_context_cache.memory->size / WASM_PAGE_SIZE;
|
|
|
|
/* Return -1 if we've hit the linear memory max */
|
|
if (unlikely(wasm_linear_memory_expand(local_sandbox_context_cache.memory, WASM_PAGE_SIZE * count) == -1))
|
|
return -1;
|
|
|
|
#ifdef LOG_SANDBOX_MEMORY_PROFILE
|
|
// Cache the runtime of the first N page allocations
|
|
for (int i = 0; i < count; i++) {
|
|
if (likely(sandbox->timestamp_of.page_allocations_size < SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT)) {
|
|
sandbox->timestamp_of.page_allocations[sandbox->timestamp_of.page_allocations_size++] =
|
|
sandbox->duration_of_state.running
|
|
+ (uint32_t)(__getcycles() - sandbox->timestamp_of.last_state_change);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
return rc;
|
|
}
|
|
|
|
EXPORT float
|
|
get_f32(uint32_t offset)
|
|
{
|
|
return wasm_linear_memory_get_float(local_sandbox_context_cache.memory, offset);
|
|
}
|
|
|
|
EXPORT void
|
|
set_f32(uint32_t offset, float v)
|
|
{
|
|
wasm_linear_memory_set_float(local_sandbox_context_cache.memory, offset, v);
|
|
}
|
|
|
|
EXPORT double
|
|
get_f64(uint32_t offset)
|
|
{
|
|
return wasm_linear_memory_get_double(local_sandbox_context_cache.memory, offset);
|
|
}
|
|
|
|
EXPORT void
|
|
set_f64(uint32_t offset, double v)
|
|
{
|
|
wasm_linear_memory_set_double(local_sandbox_context_cache.memory, offset, v);
|
|
}
|
|
|
|
EXPORT int8_t
|
|
get_i8(uint32_t offset)
|
|
{
|
|
return wasm_linear_memory_get_int8(local_sandbox_context_cache.memory, offset);
|
|
}
|
|
|
|
EXPORT void
|
|
set_i8(uint32_t offset, int8_t v)
|
|
{
|
|
wasm_linear_memory_set_int8(local_sandbox_context_cache.memory, offset, v);
|
|
}
|
|
|
|
EXPORT int16_t
|
|
get_i16(uint32_t offset)
|
|
{
|
|
return wasm_linear_memory_get_int16(local_sandbox_context_cache.memory, offset);
|
|
}
|
|
|
|
EXPORT void
|
|
set_i16(uint32_t offset, int16_t v)
|
|
{
|
|
wasm_linear_memory_set_int16(local_sandbox_context_cache.memory, offset, v);
|
|
}
|
|
|
|
EXPORT int32_t
|
|
get_i32(uint32_t offset)
|
|
{
|
|
return wasm_linear_memory_get_int32(local_sandbox_context_cache.memory, offset);
|
|
}
|
|
|
|
EXPORT void
|
|
set_i32(uint32_t offset, int32_t v)
|
|
{
|
|
wasm_linear_memory_set_int32(local_sandbox_context_cache.memory, offset, v);
|
|
}
|
|
|
|
EXPORT int64_t
|
|
get_i64(uint32_t offset)
|
|
{
|
|
return wasm_linear_memory_get_int64(local_sandbox_context_cache.memory, offset);
|
|
}
|
|
|
|
EXPORT void
|
|
set_i64(uint32_t offset, int64_t v)
|
|
{
|
|
wasm_linear_memory_set_int64(local_sandbox_context_cache.memory, offset, v);
|
|
}
|
|
|
|
EXPORT void
|
|
add_function_to_table(uint32_t idx, uint32_t type_id, char *pointer)
|
|
{
|
|
assert(idx < INDIRECT_TABLE_SIZE);
|
|
assert(local_sandbox_context_cache.module_indirect_table != NULL);
|
|
|
|
/* TODO: atomic for multiple concurrent invocations? Issue #97 */
|
|
if (local_sandbox_context_cache.module_indirect_table[idx].type_id == type_id
|
|
&& local_sandbox_context_cache.module_indirect_table[idx].func_pointer == pointer)
|
|
return;
|
|
|
|
local_sandbox_context_cache.module_indirect_table[idx] = (struct indirect_table_entry){
|
|
.type_id = type_id, .func_pointer = pointer
|
|
};
|
|
}
|
|
|
|
/*
|
|
* Table handling functionality
|
|
* This was moved from compiletime in order to place the
|
|
* function in the callstack in GDB. It can be moved back
|
|
* to runtime/compiletime/memory/64bit_nix.c to remove the
|
|
* additional function call
|
|
*/
|
|
char *
|
|
get_function_from_table(uint32_t idx, uint32_t type_id)
|
|
{
|
|
#ifdef LOG_FUNCTION_TABLE
|
|
fprintf(stderr, "get_function_from_table(idx: %u, type_id: %u)\n", idx, type_id);
|
|
fprintf(stderr, "indirect_table_size: %u\n", INDIRECT_TABLE_SIZE);
|
|
#endif
|
|
assert(idx < INDIRECT_TABLE_SIZE);
|
|
|
|
struct indirect_table_entry f = local_sandbox_context_cache.module_indirect_table[idx];
|
|
#ifdef LOG_FUNCTION_TABLE
|
|
fprintf(stderr, "assumed type: %u, type in table: %u\n", type_id, f.type_id);
|
|
#endif
|
|
// FIXME: Commented out function type check because of gocr
|
|
// assert(f.type_id == type_id);
|
|
|
|
assert(f.func_pointer != NULL);
|
|
|
|
return f.func_pointer;
|
|
}
|
|
|
|
|
|
EXPORT int32_t
|
|
get_global_i32(uint32_t offset)
|
|
{
|
|
return get_i32(offset);
|
|
}
|
|
|
|
EXPORT void
|
|
set_global_i32(uint32_t offset, int32_t v)
|
|
{
|
|
set_i32(offset, v);
|
|
}
|
|
|
|
EXPORT int64_t
|
|
get_global_i64(uint32_t offset)
|
|
{
|
|
return get_i64(offset);
|
|
}
|
|
|
|
EXPORT void
|
|
set_global_i64(uint32_t offset, int64_t v)
|
|
{
|
|
set_i64(offset, v);
|
|
}
|
|
|
|
#define CHAR_BIT 8
|
|
|
|
// TODO: Throughout here we use `assert` for error conditions, which isn't optimal
|
|
// Instead we should use `unlikely` branches to a single trapping function (which should optimize better)
|
|
// The below functions are for implementing WASM instructions
|
|
|
|
// ROTL and ROTR helper functions
|
|
INLINE uint32_t
|
|
rotl_u32(uint32_t n, uint32_t c_u32)
|
|
{
|
|
// WASM requires a modulus here (usually a single bitwise op, but it means we need no assert)
|
|
unsigned int c = c_u32 % (CHAR_BIT * sizeof(n));
|
|
const unsigned int mask = (CHAR_BIT * sizeof(n) - 1); // assumes width is a power of 2.
|
|
|
|
c &= mask;
|
|
return (n << c) | (n >> ((-c) & mask));
|
|
}
|
|
|
|
INLINE uint32_t
|
|
rotr_u32(uint32_t n, uint32_t c_u32)
|
|
{
|
|
// WASM requires a modulus here (usually a single bitwise op, but it means we need no assert)
|
|
unsigned int c = c_u32 % (CHAR_BIT * sizeof(n));
|
|
const unsigned int mask = (CHAR_BIT * sizeof(n) - 1);
|
|
|
|
c &= mask;
|
|
return (n >> c) | (n << ((-c) & mask));
|
|
}
|
|
|
|
INLINE uint64_t
|
|
rotl_u64(uint64_t n, uint64_t c_u64)
|
|
{
|
|
// WASM requires a modulus here (usually a single bitwise op, but it means we need no assert)
|
|
unsigned int c = c_u64 % (CHAR_BIT * sizeof(n));
|
|
const unsigned int mask = (CHAR_BIT * sizeof(n) - 1); // assumes width is a power of 2.
|
|
|
|
c &= mask;
|
|
return (n << c) | (n >> ((-c) & mask));
|
|
}
|
|
|
|
INLINE uint64_t
|
|
rotr_u64(uint64_t n, uint64_t c_u64)
|
|
{
|
|
// WASM requires a modulus here (usually a single bitwise op, but it means we need no assert)
|
|
unsigned int c = c_u64 % (CHAR_BIT * sizeof(n));
|
|
const unsigned int mask = (CHAR_BIT * sizeof(n) - 1);
|
|
|
|
c &= mask;
|
|
return (n >> c) | (n << ((-c) & mask));
|
|
}
|
|
|
|
// Now safe division and remainder
|
|
INLINE uint32_t
|
|
u32_div(uint32_t a, uint32_t b)
|
|
{
|
|
assert(b);
|
|
return a / b;
|
|
}
|
|
|
|
INLINE uint32_t
|
|
u32_rem(uint32_t a, uint32_t b)
|
|
{
|
|
assert(b);
|
|
return a % b;
|
|
}
|
|
|
|
INLINE int32_t
|
|
i32_div(int32_t a, int32_t b)
|
|
{
|
|
assert(b && (a != INT32_MIN || b != -1));
|
|
return a / b;
|
|
}
|
|
|
|
INLINE int32_t
|
|
i32_rem(int32_t a, int32_t b)
|
|
{
|
|
assert(b && (a != INT32_MIN || b != -1));
|
|
return a % b;
|
|
}
|
|
|
|
INLINE uint64_t
|
|
u64_div(uint64_t a, uint64_t b)
|
|
{
|
|
assert(b);
|
|
return a / b;
|
|
}
|
|
|
|
INLINE uint64_t
|
|
u64_rem(uint64_t a, uint64_t b)
|
|
{
|
|
assert(b);
|
|
return a % b;
|
|
}
|
|
|
|
INLINE int64_t
|
|
i64_div(int64_t a, int64_t b)
|
|
{
|
|
assert(b && (a != INT64_MIN || b != -1));
|
|
return a / b;
|
|
}
|
|
|
|
INLINE int64_t
|
|
i64_rem(int64_t a, int64_t b)
|
|
{
|
|
assert(b && (a != INT64_MIN || b != -1));
|
|
return a % b;
|
|
}
|
|
|
|
// float to integer conversion methods
|
|
// In C, float => int conversions always truncate
|
|
// If a int2float(int::min_value) <= float <= int2float(int::max_value), it must always be safe to truncate it
|
|
uint32_t
|
|
u32_trunc_f32(float f)
|
|
{
|
|
assert(0 <= f && f <= (float)UINT32_MAX);
|
|
return (uint32_t)f;
|
|
}
|
|
|
|
int32_t
|
|
i32_trunc_f32(float f)
|
|
{
|
|
assert(INT32_MIN <= f && f <= (float)INT32_MAX);
|
|
return (int32_t)f;
|
|
}
|
|
|
|
uint32_t
|
|
u32_trunc_f64(double f)
|
|
{
|
|
assert(0 <= f && f <= (double)UINT32_MAX);
|
|
return (uint32_t)f;
|
|
}
|
|
|
|
int32_t
|
|
i32_trunc_f64(double f)
|
|
{
|
|
assert(INT32_MIN <= f && f <= (double)INT32_MAX);
|
|
return (int32_t)f;
|
|
}
|
|
|
|
uint64_t
|
|
u64_trunc_f32(float f)
|
|
{
|
|
assert(0 <= f && f <= (float)UINT64_MAX);
|
|
return (uint64_t)f;
|
|
}
|
|
|
|
int64_t
|
|
i64_trunc_f32(float f)
|
|
{
|
|
assert(INT64_MIN <= f && f <= (float)INT64_MAX);
|
|
return (int64_t)f;
|
|
}
|
|
|
|
uint64_t
|
|
u64_trunc_f64(double f)
|
|
{
|
|
assert(0 <= f && f <= (double)UINT64_MAX);
|
|
return (uint64_t)f;
|
|
}
|
|
|
|
int64_t
|
|
i64_trunc_f64(double f)
|
|
{
|
|
assert(INT64_MIN <= f && f <= (double)INT64_MAX);
|
|
return (int64_t)f;
|
|
}
|
|
|
|
// Float => Float truncation functions
|
|
INLINE float
|
|
f32_trunc_f32(float f)
|
|
{
|
|
return trunc(f);
|
|
}
|
|
|
|
INLINE float
|
|
f32_min(float a, float b)
|
|
{
|
|
return a < b ? a : b;
|
|
}
|
|
|
|
INLINE float
|
|
f32_max(float a, float b)
|
|
{
|
|
return a > b ? a : b;
|
|
}
|
|
|
|
INLINE float
|
|
f32_floor(float a)
|
|
{
|
|
return floor(a);
|
|
}
|
|
|
|
INLINE double
|
|
f64_min(double a, double b)
|
|
{
|
|
return a < b ? a : b;
|
|
}
|
|
|
|
INLINE double
|
|
f64_max(double a, double b)
|
|
{
|
|
return a > b ? a : b;
|
|
}
|
|
|
|
INLINE double
|
|
f64_floor(double a)
|
|
{
|
|
return floor(a);
|
|
}
|