docs: Add more inline docs about buffers

pull/288/head
Sean McBride 4 years ago
parent 7d91a9cfc0
commit fdaff6c666

@ -56,7 +56,7 @@ perf_window_swap(struct perf_window *perf_window, uint16_t first_by_duration_idx
perf_window->by_termination[second_by_termination_idx] = first_by_duration_idx;
/* Swap by_termination_idx */
struct execution_node tmp_node = perf_window->by_duration[first_by_duration_idx];
struct execution_node tmp_node = perf_window->by_duration[first_by_duration_idx];
perf_window->by_duration[first_by_duration_idx] = perf_window->by_duration[second_by_duration_idx];
perf_window->by_duration[second_by_duration_idx] = tmp_node;
@ -91,7 +91,7 @@ perf_window_add(struct perf_window *perf_window, uint64_t value)
for (int i = 0; i < PERF_WINDOW_BUFFER_SIZE; i++) {
perf_window->by_termination[i] = i;
perf_window->by_duration[i] = (struct execution_node){ .execution_time = value,
.by_termination_idx = i };
.by_termination_idx = i };
}
perf_window->count = PERF_WINDOW_BUFFER_SIZE;
goto done;
@ -112,13 +112,17 @@ perf_window_add(struct perf_window *perf_window, uint64_t value)
}
} else {
for (int i = idx_of_oldest;
i - 1 >= 0 && perf_window->by_duration[i - 1].execution_time > perf_window->by_duration[i].execution_time; i--) {
i - 1 >= 0
&& perf_window->by_duration[i - 1].execution_time > perf_window->by_duration[i].execution_time;
i--) {
perf_window_swap(perf_window, i, i - 1);
}
}
/* The idx that we replaces should still point to the same value */
assert(perf_window->by_duration[perf_window->by_termination[perf_window->count % PERF_WINDOW_BUFFER_SIZE]].execution_time == value);
assert(perf_window->by_duration[perf_window->by_termination[perf_window->count % PERF_WINDOW_BUFFER_SIZE]]
.execution_time
== value);
/* The by_duration array should be ordered by execution time */
#ifndef NDEBUG

@ -6,7 +6,7 @@
struct vec_u8 {
size_t length;
size_t capacity;
uint8_t *buffer;
uint8_t *buffer; /* Backing heap allocation. Different lifetime because realloc might move this */
};
static inline struct vec_u8 *vec_u8_alloc(void);

@ -19,7 +19,7 @@ struct wasm_memory {
size_t size; /* Initial Size in bytes */
size_t capacity; /* Size backed by actual pages */
size_t max; /* Soft cap in bytes. Defaults to 4GB */
uint8_t * buffer;
uint8_t * buffer; /* Backing heap allocation. Different lifetime because realloc might move this */
};
static INLINE struct wasm_memory *wasm_memory_alloc(void);
@ -134,7 +134,8 @@ wasm_memory_expand(struct wasm_memory *wasm_memory, size_t size_to_expand)
{
size_t target_size = wasm_memory->size + size_to_expand;
if (unlikely(target_size > wasm_memory->max)) {
fprintf(stderr, "wasm_memory_expand - Out of Memory!. %lu out of %lu\n", wasm_memory->size, wasm_memory->max);
fprintf(stderr, "wasm_memory_expand - Out of Memory!. %lu out of %lu\n", wasm_memory->size,
wasm_memory->max);
return -1;
}

@ -7,12 +7,26 @@
#include "sandbox_types.h"
#include "types.h"
/**
* @brief wasm_stack is a stack used to execute an AOT-compiled WebAssembly instance. It is allocated with a static size
* and a guard page beneath the lowest usuable address. Because the stack grows down, this protects against stack
* overflow.
*
* Low Address <---------------------------------------------------------------------------> High Address
* | GUARD PAGE | USEABE FOR STACK FRAMES (SIZE of capacity) |
* /\ /\ /\
* buffer low high
*
* | Frame 2 | Frame 1 | Frame 0 |
* <<<<<<< Direction of Stack Growth
*/
struct wasm_stack {
struct ps_list list; /* Linked List Node used for object pool */
size_t capacity; /* Usable capacity. Excludes size of guard page that we need to free */
uint8_t * high; /* The highest address of the stack. Grows down from here */
uint8_t * low; /* The address of the lowest usabe address. Above guard page */
uint8_t * buffer; /* Points to Guard Page */
uint8_t * low; /* The address of the lowest useabe address. Above guard page */
uint8_t * buffer; /* Points base address of backing heap allocation (Guard Page) */
};
static inline struct wasm_stack *
@ -36,14 +50,14 @@ wasm_stack_init(struct wasm_stack *wasm_stack, size_t capacity)
int rc = 0;
wasm_stack->buffer = (uint8_t *)mmap(NULL, /* guard page */ PAGE_SIZE + capacity, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (unlikely(wasm_stack->buffer == MAP_FAILED)) {
perror("sandbox allocate stack");
goto err_stack_allocation_failed;
}
wasm_stack->low = (uint8_t *)mmap(wasm_stack->buffer + /* guard page */ PAGE_SIZE, capacity, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
wasm_stack->low = (uint8_t *)mmap(wasm_stack->buffer + /* guard page */ PAGE_SIZE, capacity,
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (unlikely(wasm_stack->low == MAP_FAILED)) {
perror("sandbox set stack read/write");
goto err_stack_prot_failed;
@ -61,7 +75,7 @@ err_stack_prot_failed:
if (rc == -1) perror("munmap");
err_stack_allocation_failed:
wasm_stack->buffer = NULL;
rc = -1;
rc = -1;
goto done;
}
@ -76,7 +90,7 @@ static struct wasm_stack *
wasm_stack_new(size_t capacity)
{
struct wasm_stack *wasm_stack = wasm_stack_allocate();
int rc = wasm_stack_init(wasm_stack, capacity);
int rc = wasm_stack_init(wasm_stack, capacity);
if (rc < 0) {
wasm_stack_free(wasm_stack);
return NULL;

@ -17,7 +17,7 @@ struct wasm_table_entry {
struct wasm_table {
uint32_t length;
uint32_t capacity;
struct wasm_table_entry *buffer;
struct wasm_table_entry *buffer; /* Backing heap allocation */
};
static INLINE struct wasm_table *wasm_table_alloc(void);

Loading…
Cancel
Save