parent
7e46f58d0f
commit
ef09fcc3f1
@ -1,182 +1,405 @@
|
||||
#include <assert.h>
|
||||
#include <assert.h>
|
||||
#include <math.h>
|
||||
|
||||
#include "types.h"
|
||||
#include "sandbox_context_cache.h"
|
||||
|
||||
uint32_t
|
||||
/* This file contains the stub functions that the aWsm compiler expects
|
||||
* This corresponds to awsm/src/codegen/runtime_stubs.rs
|
||||
* This should be linked with the *.bc file generated by aWsm in order to compile a module as a *.so
|
||||
*/
|
||||
|
||||
extern thread_local struct sandbox_context_cache local_sandbox_context_cache;
|
||||
|
||||
EXPORT void
|
||||
initialize_region(uint32_t offset, uint32_t region_size, uint8_t region[region_size])
|
||||
{
|
||||
wasm_linear_memory_initialize_region(local_sandbox_context_cache.memory, offset, region_size, region);
|
||||
}
|
||||
|
||||
EXPORT uint32_t
|
||||
instruction_memory_size()
|
||||
{
|
||||
return local_sandbox_context_cache.memory.size / WASM_PAGE_SIZE;
|
||||
return wasm_linear_memory_get_page_count(local_sandbox_context_cache.memory);
|
||||
}
|
||||
|
||||
// All of these are pretty generic
|
||||
INLINE float
|
||||
get_f32(uint32_t offset)
|
||||
/**
|
||||
* @brief Stub that implements the WebAssembly memory.grow instruction
|
||||
*
|
||||
* @param count number of pages to grow the WebAssembly linear memory by
|
||||
* @return The previous size of the linear memory in pages or -1 if enough memory cannot be allocated
|
||||
*/
|
||||
EXPORT int32_t
|
||||
instruction_memory_grow(uint32_t count)
|
||||
{
|
||||
assert(offset + sizeof(float) <= local_sandbox_context_cache.memory.size);
|
||||
int rc = local_sandbox_context_cache.memory->size / WASM_PAGE_SIZE;
|
||||
|
||||
/* Return -1 if we've hit the linear memory max */
|
||||
if (unlikely(wasm_linear_memory_expand(local_sandbox_context_cache.memory, WASM_PAGE_SIZE * count) == -1))
|
||||
return -1;
|
||||
|
||||
#ifdef LOG_SANDBOX_MEMORY_PROFILE
|
||||
// Cache the runtime of the first N page allocations
|
||||
for (int i = 0; i < count; i++) {
|
||||
if (likely(sandbox->timestamp_of.page_allocations_size < SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT)) {
|
||||
sandbox->timestamp_of.page_allocations[sandbox->timestamp_of.page_allocations_size++] =
|
||||
sandbox->duration_of_state.running
|
||||
+ (uint32_t)(__getcycles() - sandbox->timestamp_of.last_state_change);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
|
||||
void *address = &mem_as_chars[offset];
|
||||
EXPORT float
|
||||
get_f32(uint32_t offset)
|
||||
{
|
||||
return wasm_linear_memory_get_float(local_sandbox_context_cache.memory, offset);
|
||||
}
|
||||
|
||||
return *(float *)address;
|
||||
EXPORT void
|
||||
set_f32(uint32_t offset, float v)
|
||||
{
|
||||
wasm_linear_memory_set_float(local_sandbox_context_cache.memory, offset, v);
|
||||
}
|
||||
|
||||
INLINE double
|
||||
EXPORT double
|
||||
get_f64(uint32_t offset)
|
||||
{
|
||||
assert(offset + sizeof(double) <= local_sandbox_context_cache.memory.size);
|
||||
|
||||
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
|
||||
void *address = &mem_as_chars[offset];
|
||||
return wasm_linear_memory_get_double(local_sandbox_context_cache.memory, offset);
|
||||
}
|
||||
|
||||
return *(double *)address;
|
||||
EXPORT void
|
||||
set_f64(uint32_t offset, double v)
|
||||
{
|
||||
wasm_linear_memory_set_double(local_sandbox_context_cache.memory, offset, v);
|
||||
}
|
||||
|
||||
INLINE int8_t
|
||||
EXPORT int8_t
|
||||
get_i8(uint32_t offset)
|
||||
{
|
||||
assert(offset + sizeof(int8_t) <= local_sandbox_context_cache.memory.size);
|
||||
|
||||
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
|
||||
void *address = &mem_as_chars[offset];
|
||||
return wasm_linear_memory_get_int8(local_sandbox_context_cache.memory, offset);
|
||||
}
|
||||
|
||||
return *(int8_t *)address;
|
||||
EXPORT void
|
||||
set_i8(uint32_t offset, int8_t v)
|
||||
{
|
||||
wasm_linear_memory_set_int8(local_sandbox_context_cache.memory, offset, v);
|
||||
}
|
||||
|
||||
INLINE int16_t
|
||||
EXPORT int16_t
|
||||
get_i16(uint32_t offset)
|
||||
{
|
||||
assert(offset + sizeof(int16_t) <= local_sandbox_context_cache.memory.size);
|
||||
|
||||
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
|
||||
void *address = &mem_as_chars[offset];
|
||||
return wasm_linear_memory_get_int16(local_sandbox_context_cache.memory, offset);
|
||||
}
|
||||
|
||||
return *(int16_t *)address;
|
||||
EXPORT void
|
||||
set_i16(uint32_t offset, int16_t v)
|
||||
{
|
||||
wasm_linear_memory_set_int16(local_sandbox_context_cache.memory, offset, v);
|
||||
}
|
||||
|
||||
INLINE int32_t
|
||||
EXPORT int32_t
|
||||
get_i32(uint32_t offset)
|
||||
{
|
||||
assert(offset + sizeof(int32_t) <= local_sandbox_context_cache.memory.size);
|
||||
|
||||
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
|
||||
void *address = &mem_as_chars[offset];
|
||||
return wasm_linear_memory_get_int32(local_sandbox_context_cache.memory, offset);
|
||||
}
|
||||
|
||||
return *(int32_t *)address;
|
||||
EXPORT void
|
||||
set_i32(uint32_t offset, int32_t v)
|
||||
{
|
||||
wasm_linear_memory_set_int32(local_sandbox_context_cache.memory, offset, v);
|
||||
}
|
||||
|
||||
INLINE int64_t
|
||||
EXPORT int64_t
|
||||
get_i64(uint32_t offset)
|
||||
{
|
||||
assert(offset + sizeof(int64_t) <= local_sandbox_context_cache.memory.size);
|
||||
return wasm_linear_memory_get_int64(local_sandbox_context_cache.memory, offset);
|
||||
}
|
||||
|
||||
EXPORT void
|
||||
set_i64(uint32_t offset, int64_t v)
|
||||
{
|
||||
wasm_linear_memory_set_int64(local_sandbox_context_cache.memory, offset, v);
|
||||
}
|
||||
|
||||
EXPORT void
|
||||
add_function_to_table(uint32_t idx, uint32_t type_id, char *pointer)
|
||||
{
|
||||
assert(idx < INDIRECT_TABLE_SIZE);
|
||||
assert(local_sandbox_context_cache.module_indirect_table != NULL);
|
||||
|
||||
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
|
||||
void *address = &mem_as_chars[offset];
|
||||
/* TODO: atomic for multiple concurrent invocations? Issue #97 */
|
||||
if (local_sandbox_context_cache.module_indirect_table[idx].type_id == type_id
|
||||
&& local_sandbox_context_cache.module_indirect_table[idx].func_pointer == pointer)
|
||||
return;
|
||||
|
||||
return *(int64_t *)address;
|
||||
local_sandbox_context_cache.module_indirect_table[idx] = (struct indirect_table_entry){
|
||||
.type_id = type_id, .func_pointer = pointer
|
||||
};
|
||||
}
|
||||
|
||||
INLINE int32_t
|
||||
/*
|
||||
* Table handling functionality
|
||||
* This was moved from compiletime in order to place the
|
||||
* function in the callstack in GDB. It can be moved back
|
||||
* to runtime/compiletime/memory/64bit_nix.c to remove the
|
||||
* additional function call
|
||||
*/
|
||||
char *
|
||||
get_function_from_table(uint32_t idx, uint32_t type_id)
|
||||
{
|
||||
#ifdef LOG_FUNCTION_TABLE
|
||||
fprintf(stderr, "get_function_from_table(idx: %u, type_id: %u)\n", idx, type_id);
|
||||
fprintf(stderr, "indirect_table_size: %u\n", INDIRECT_TABLE_SIZE);
|
||||
#endif
|
||||
assert(idx < INDIRECT_TABLE_SIZE);
|
||||
|
||||
struct indirect_table_entry f = local_sandbox_context_cache.module_indirect_table[idx];
|
||||
#ifdef LOG_FUNCTION_TABLE
|
||||
fprintf(stderr, "assumed type: %u, type in table: %u\n", type_id, f.type_id);
|
||||
#endif
|
||||
// FIXME: Commented out function type check because of gocr
|
||||
// assert(f.type_id == type_id);
|
||||
|
||||
assert(f.func_pointer != NULL);
|
||||
|
||||
return f.func_pointer;
|
||||
}
|
||||
|
||||
|
||||
EXPORT int32_t
|
||||
get_global_i32(uint32_t offset)
|
||||
{
|
||||
return get_i32(offset);
|
||||
}
|
||||
|
||||
INLINE int64_t
|
||||
EXPORT void
|
||||
set_global_i32(uint32_t offset, int32_t v)
|
||||
{
|
||||
set_i32(offset, v);
|
||||
}
|
||||
|
||||
EXPORT int64_t
|
||||
get_global_i64(uint32_t offset)
|
||||
{
|
||||
return get_i64(offset);
|
||||
}
|
||||
|
||||
// Now setting routines
|
||||
INLINE void
|
||||
set_f32(uint32_t offset, float v)
|
||||
EXPORT void
|
||||
set_global_i64(uint32_t offset, int64_t v)
|
||||
{
|
||||
assert(offset + sizeof(float) <= local_sandbox_context_cache.memory.size);
|
||||
set_i64(offset, v);
|
||||
}
|
||||
|
||||
#define CHAR_BIT 8
|
||||
|
||||
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
|
||||
void *address = &mem_as_chars[offset];
|
||||
// TODO: Throughout here we use `assert` for error conditions, which isn't optimal
|
||||
// Instead we should use `unlikely` branches to a single trapping function (which should optimize better)
|
||||
// The below functions are for implementing WASM instructions
|
||||
|
||||
// ROTL and ROTR helper functions
|
||||
INLINE uint32_t
|
||||
rotl_u32(uint32_t n, uint32_t c_u32)
|
||||
{
|
||||
// WASM requires a modulus here (usually a single bitwise op, but it means we need no assert)
|
||||
unsigned int c = c_u32 % (CHAR_BIT * sizeof(n));
|
||||
const unsigned int mask = (CHAR_BIT * sizeof(n) - 1); // assumes width is a power of 2.
|
||||
|
||||
*(float *)address = v;
|
||||
c &= mask;
|
||||
return (n << c) | (n >> ((-c) & mask));
|
||||
}
|
||||
|
||||
INLINE void
|
||||
set_f64(uint32_t offset, double v)
|
||||
INLINE uint32_t
|
||||
rotr_u32(uint32_t n, uint32_t c_u32)
|
||||
{
|
||||
assert(offset + sizeof(double) <= local_sandbox_context_cache.memory.size);
|
||||
// WASM requires a modulus here (usually a single bitwise op, but it means we need no assert)
|
||||
unsigned int c = c_u32 % (CHAR_BIT * sizeof(n));
|
||||
const unsigned int mask = (CHAR_BIT * sizeof(n) - 1);
|
||||
|
||||
c &= mask;
|
||||
return (n >> c) | (n << ((-c) & mask));
|
||||
}
|
||||
|
||||
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
|
||||
void *address = &mem_as_chars[offset];
|
||||
INLINE uint64_t
|
||||
rotl_u64(uint64_t n, uint64_t c_u64)
|
||||
{
|
||||
// WASM requires a modulus here (usually a single bitwise op, but it means we need no assert)
|
||||
unsigned int c = c_u64 % (CHAR_BIT * sizeof(n));
|
||||
const unsigned int mask = (CHAR_BIT * sizeof(n) - 1); // assumes width is a power of 2.
|
||||
|
||||
*(double *)address = v;
|
||||
c &= mask;
|
||||
return (n << c) | (n >> ((-c) & mask));
|
||||
}
|
||||
|
||||
INLINE void
|
||||
set_i8(uint32_t offset, int8_t v)
|
||||
INLINE uint64_t
|
||||
rotr_u64(uint64_t n, uint64_t c_u64)
|
||||
{
|
||||
assert(offset + sizeof(int8_t) <= local_sandbox_context_cache.memory.size);
|
||||
// WASM requires a modulus here (usually a single bitwise op, but it means we need no assert)
|
||||
unsigned int c = c_u64 % (CHAR_BIT * sizeof(n));
|
||||
const unsigned int mask = (CHAR_BIT * sizeof(n) - 1);
|
||||
|
||||
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
|
||||
void *address = &mem_as_chars[offset];
|
||||
c &= mask;
|
||||
return (n >> c) | (n << ((-c) & mask));
|
||||
}
|
||||
|
||||
*(int8_t *)address = v;
|
||||
// Now safe division and remainder
|
||||
INLINE uint32_t
|
||||
u32_div(uint32_t a, uint32_t b)
|
||||
{
|
||||
assert(b);
|
||||
return a / b;
|
||||
}
|
||||
|
||||
INLINE void
|
||||
set_i16(uint32_t offset, int16_t v)
|
||||
INLINE uint32_t
|
||||
u32_rem(uint32_t a, uint32_t b)
|
||||
{
|
||||
assert(offset + sizeof(int16_t) <= local_sandbox_context_cache.memory.size);
|
||||
assert(b);
|
||||
return a % b;
|
||||
}
|
||||
|
||||
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
|
||||
void *address = &mem_as_chars[offset];
|
||||
INLINE int32_t
|
||||
i32_div(int32_t a, int32_t b)
|
||||
{
|
||||
assert(b && (a != INT32_MIN || b != -1));
|
||||
return a / b;
|
||||
}
|
||||
|
||||
*(int16_t *)address = v;
|
||||
INLINE int32_t
|
||||
i32_rem(int32_t a, int32_t b)
|
||||
{
|
||||
assert(b && (a != INT32_MIN || b != -1));
|
||||
return a % b;
|
||||
}
|
||||
|
||||
INLINE void
|
||||
set_i32(uint32_t offset, int32_t v)
|
||||
INLINE uint64_t
|
||||
u64_div(uint64_t a, uint64_t b)
|
||||
{
|
||||
assert(offset + sizeof(int32_t) <= local_sandbox_context_cache.memory.size);
|
||||
assert(b);
|
||||
return a / b;
|
||||
}
|
||||
|
||||
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
|
||||
void *address = &mem_as_chars[offset];
|
||||
INLINE uint64_t
|
||||
u64_rem(uint64_t a, uint64_t b)
|
||||
{
|
||||
assert(b);
|
||||
return a % b;
|
||||
}
|
||||
|
||||
*(int32_t *)address = v;
|
||||
INLINE int64_t
|
||||
i64_div(int64_t a, int64_t b)
|
||||
{
|
||||
assert(b && (a != INT64_MIN || b != -1));
|
||||
return a / b;
|
||||
}
|
||||
|
||||
INLINE void
|
||||
set_i64(uint32_t offset, int64_t v)
|
||||
INLINE int64_t
|
||||
i64_rem(int64_t a, int64_t b)
|
||||
{
|
||||
assert(offset + sizeof(int64_t) <= local_sandbox_context_cache.memory.size);
|
||||
assert(b && (a != INT64_MIN || b != -1));
|
||||
return a % b;
|
||||
}
|
||||
|
||||
char *mem_as_chars = (char *)local_sandbox_context_cache.memory.start;
|
||||
void *address = &mem_as_chars[offset];
|
||||
// float to integer conversion methods
|
||||
// In C, float => int conversions always truncate
|
||||
// If a int2float(int::min_value) <= float <= int2float(int::max_value), it must always be safe to truncate it
|
||||
uint32_t
|
||||
u32_trunc_f32(float f)
|
||||
{
|
||||
assert(0 <= f && f <= (float)UINT32_MAX);
|
||||
return (uint32_t)f;
|
||||
}
|
||||
|
||||
*(int64_t *)address = v;
|
||||
int32_t
|
||||
i32_trunc_f32(float f)
|
||||
{
|
||||
assert(INT32_MIN <= f && f <= (float)INT32_MAX);
|
||||
return (int32_t)f;
|
||||
}
|
||||
|
||||
INLINE void
|
||||
set_global_i32(uint32_t offset, int32_t v)
|
||||
uint32_t
|
||||
u32_trunc_f64(double f)
|
||||
{
|
||||
set_i32(offset, v);
|
||||
assert(0 <= f && f <= (double)UINT32_MAX);
|
||||
return (uint32_t)f;
|
||||
}
|
||||
|
||||
INLINE void
|
||||
set_global_i64(uint32_t offset, int64_t v)
|
||||
int32_t
|
||||
i32_trunc_f64(double f)
|
||||
{
|
||||
set_i64(offset, v);
|
||||
assert(INT32_MIN <= f && f <= (double)INT32_MAX);
|
||||
return (int32_t)f;
|
||||
}
|
||||
|
||||
// Table handling functionality
|
||||
// INLINE char *
|
||||
// get_function_from_table(uint32_t idx, uint32_t type_id)
|
||||
// {
|
||||
// assert(idx < INDIRECT_TABLE_SIZE);
|
||||
uint64_t
|
||||
u64_trunc_f32(float f)
|
||||
{
|
||||
assert(0 <= f && f <= (float)UINT64_MAX);
|
||||
return (uint64_t)f;
|
||||
}
|
||||
|
||||
// struct indirect_table_entry f = local_sandbox_context_cache.module_indirect_table[idx];
|
||||
int64_t
|
||||
i64_trunc_f32(float f)
|
||||
{
|
||||
assert(INT64_MIN <= f && f <= (float)INT64_MAX);
|
||||
return (int64_t)f;
|
||||
}
|
||||
|
||||
// // FIXME: Commented out function type check because of gocr
|
||||
// // assert(f.type_id == type_id);
|
||||
uint64_t
|
||||
u64_trunc_f64(double f)
|
||||
{
|
||||
assert(0 <= f && f <= (double)UINT64_MAX);
|
||||
return (uint64_t)f;
|
||||
}
|
||||
|
||||
int64_t
|
||||
i64_trunc_f64(double f)
|
||||
{
|
||||
assert(INT64_MIN <= f && f <= (double)INT64_MAX);
|
||||
return (int64_t)f;
|
||||
}
|
||||
|
||||
// Float => Float truncation functions
|
||||
INLINE float
|
||||
f32_trunc_f32(float f)
|
||||
{
|
||||
return trunc(f);
|
||||
}
|
||||
|
||||
INLINE float
|
||||
f32_min(float a, float b)
|
||||
{
|
||||
return a < b ? a : b;
|
||||
}
|
||||
|
||||
// assert(f.func_pointer);
|
||||
INLINE float
|
||||
f32_max(float a, float b)
|
||||
{
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
// return f.func_pointer;
|
||||
// }
|
||||
INLINE float
|
||||
f32_floor(float a)
|
||||
{
|
||||
return floor(a);
|
||||
}
|
||||
|
||||
INLINE double
|
||||
f64_min(double a, double b)
|
||||
{
|
||||
return a < b ? a : b;
|
||||
}
|
||||
|
||||
INLINE double
|
||||
f64_max(double a, double b)
|
||||
{
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
INLINE double
|
||||
f64_floor(double a)
|
||||
{
|
||||
return floor(a);
|
||||
}
|
||||
|
@ -0,0 +1,11 @@
|
||||
#pragma once
|
||||
#include "wasm_linear_memory.h"
|
||||
#include "wasm_indirect_table.h"
|
||||
|
||||
/* Cache of Frequently Accessed Members used to avoid pointer chasing */
|
||||
struct sandbox_context_cache {
|
||||
struct wasm_linear_memory * memory;
|
||||
struct indirect_table_entry *module_indirect_table;
|
||||
};
|
||||
|
||||
extern thread_local struct sandbox_context_cache local_sandbox_context_cache;
|
@ -0,0 +1,11 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
/* memory also provides the table access functions */
|
||||
#define INDIRECT_TABLE_SIZE (1 << 10)
|
||||
|
||||
struct indirect_table_entry {
|
||||
uint32_t type_id;
|
||||
void * func_pointer;
|
||||
};
|
@ -1,103 +0,0 @@
|
||||
#include "current_sandbox.h"
|
||||
#include "panic.h"
|
||||
#include "runtime.h"
|
||||
#include "sandbox_types.h"
|
||||
#include "types.h"
|
||||
|
||||
#include <sys/mman.h>
|
||||
|
||||
/**
|
||||
* @brief Expand the linear memory of the active WebAssembly sandbox by a single page
|
||||
*
|
||||
* @return int
|
||||
*/
|
||||
int
|
||||
expand_memory(void)
|
||||
{
|
||||
struct sandbox *sandbox = current_sandbox_get();
|
||||
|
||||
assert(sandbox->state == SANDBOX_RUNNING_USER || sandbox->state == SANDBOX_RUNNING_SYS);
|
||||
assert(local_sandbox_context_cache.memory->size % WASM_PAGE_SIZE == 0);
|
||||
|
||||
/* Return -1 if we've hit the linear memory max */
|
||||
if (unlikely(wasm_linear_memory_expand(local_sandbox_context_cache.memory, WASM_PAGE_SIZE) == -1)) return -1;
|
||||
|
||||
#ifdef LOG_SANDBOX_MEMORY_PROFILE
|
||||
// Cache the runtime of the first N page allocations
|
||||
if (likely(sandbox->timestamp_of.page_allocations_size < SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT)) {
|
||||
sandbox->timestamp_of.page_allocations[sandbox->timestamp_of.page_allocations_size++] =
|
||||
sandbox->duration_of_state.running
|
||||
+ (uint32_t)(__getcycles() - sandbox->timestamp_of.last_state_change);
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
INLINE char *
|
||||
get_memory_ptr_for_runtime(uint32_t offset, uint32_t size)
|
||||
{
|
||||
return (char *)wasm_linear_memory_get_ptr_void(local_sandbox_context_cache.memory, offset, size);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Stub that implements the WebAssembly memory.grow instruction
|
||||
*
|
||||
* @param count number of pages to grow the WebAssembly linear memory by
|
||||
* @return The previous size of the linear memory in pages or -1 if enough memory cannot be allocated
|
||||
*/
|
||||
int32_t
|
||||
instruction_memory_grow(uint32_t count)
|
||||
{
|
||||
int rc = local_sandbox_context_cache.memory->size / WASM_PAGE_SIZE;
|
||||
|
||||
struct sandbox *sandbox = current_sandbox_get();
|
||||
|
||||
assert(sandbox->state == SANDBOX_RUNNING_USER || sandbox->state == SANDBOX_RUNNING_SYS);
|
||||
assert(local_sandbox_context_cache.memory->size % WASM_PAGE_SIZE == 0);
|
||||
|
||||
/* Return -1 if we've hit the linear memory max */
|
||||
if (unlikely(wasm_linear_memory_expand(local_sandbox_context_cache.memory, WASM_PAGE_SIZE * count) == -1))
|
||||
return -1;
|
||||
|
||||
#ifdef LOG_SANDBOX_MEMORY_PROFILE
|
||||
// Cache the runtime of the first N page allocations
|
||||
for (int i = 0; i < count; i++) {
|
||||
if (likely(sandbox->timestamp_of.page_allocations_size < SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT)) {
|
||||
sandbox->timestamp_of.page_allocations[sandbox->timestamp_of.page_allocations_size++] =
|
||||
sandbox->duration_of_state.running
|
||||
+ (uint32_t)(__getcycles() - sandbox->timestamp_of.last_state_change);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return local_sandbox_context_cache.memory->size / WASM_PAGE_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Table handling functionality
|
||||
* This was moved from compiletime in order to place the
|
||||
* function in the callstack in GDB. It can be moved back
|
||||
* to runtime/compiletime/memory/64bit_nix.c to remove the
|
||||
* additional function call
|
||||
*/
|
||||
char *
|
||||
get_function_from_table(uint32_t idx, uint32_t type_id)
|
||||
{
|
||||
#ifdef LOG_FUNCTION_TABLE
|
||||
fprintf(stderr, "get_function_from_table(idx: %u, type_id: %u)\n", idx, type_id);
|
||||
fprintf(stderr, "indirect_table_size: %u\n", INDIRECT_TABLE_SIZE);
|
||||
#endif
|
||||
assert(idx < INDIRECT_TABLE_SIZE);
|
||||
|
||||
struct indirect_table_entry f = local_sandbox_context_cache.module_indirect_table[idx];
|
||||
#ifdef LOG_FUNCTION_TABLE
|
||||
fprintf(stderr, "assumed type: %u, type in table: %u\n", type_id, f.type_id);
|
||||
#endif
|
||||
// FIXME: Commented out function type check because of gocr
|
||||
// assert(f.type_id == type_id);
|
||||
|
||||
assert(f.func_pointer != NULL);
|
||||
|
||||
return f.func_pointer;
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "runtime.h"
|
||||
#include "types.h"
|
||||
|
||||
/* Region initialization helper function */
|
||||
EXPORT void
|
||||
initialize_region(uint32_t offset, uint32_t region_size, uint8_t region[region_size])
|
||||
{
|
||||
assert((size_t)offset + region_size < local_sandbox_context_cache.memory->size);
|
||||
|
||||
memcpy(get_memory_ptr_for_runtime(offset, region_size), region, region_size);
|
||||
}
|
||||
|
||||
void
|
||||
add_function_to_table(uint32_t idx, uint32_t type_id, char *pointer)
|
||||
{
|
||||
assert(idx < INDIRECT_TABLE_SIZE);
|
||||
assert(local_sandbox_context_cache.module_indirect_table != NULL);
|
||||
|
||||
/* TODO: atomic for multiple concurrent invocations? Issue #97 */
|
||||
if (local_sandbox_context_cache.module_indirect_table[idx].type_id == type_id
|
||||
&& local_sandbox_context_cache.module_indirect_table[idx].func_pointer == pointer)
|
||||
return;
|
||||
|
||||
local_sandbox_context_cache.module_indirect_table[idx] = (struct indirect_table_entry){
|
||||
.type_id = type_id, .func_pointer = pointer
|
||||
};
|
||||
}
|
||||
|
||||
/* If we are using runtime globals, we need to populate them */
|
||||
WEAK void
|
||||
populate_globals()
|
||||
{
|
||||
assert(0); /* FIXME: is this used in WASM as dynamic modules? Issue #105. */
|
||||
}
|
Loading…
Reference in new issue